From b0581880547cda082033c721b7abd36aff867764 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sun, 28 Jul 2024 16:05:56 +0400 Subject: [PATCH] feat(confix): copy confix from cosmos sdk (#3036) # Confix `Confix` is a configuration management tool that allows you to manage your configuration via CLI. It is based on the [CometBFT RFC 019](https://github.com/cometbft/cometbft/blob/5013bc3f4a6d64dcc2bf02ccc002ebc9881c62e4/docs/rfc/rfc-019-config-version.md). ## Usage ### Get Get a configuration value, e.g.: ```shell cometbft config get pruning # gets the value pruning cometbft config get chain-id # gets the value chain-id ``` ### Set Set a configuration value, e.g.: ```shell cometbft config set pruning "enabled" # sets the value pruning cometbft config set chain-id "foo-1" # sets the value chain-id ``` ### Migrate Migrate a configuration file to a new version: ```shell cometbft config migrate v0.38 # migrates defaultHome/config/config.toml to the latest v0.38 config ``` ### Diff Get the diff between a given configuration file and the default configuration file, e.g.: ```shell cometbft config diff v0.38 # gets the diff between defaultHome/config/config.toml and the latest v0.38 config ``` ### View View a configuration file, e.g: ```shell cometbft config view # views the current config ``` ## Credits This project is based on the [CometBFT RFC 019](https://github.com/cometbft/cometbft/blob/5013bc3f4a6d64dcc2bf02ccc002ebc9881c62e4/docs/rfc/rfc-019-config-version.md) and their own implementation of [confix](https://github.com/cometbft/cometbft/blob/v0.36.x/scripts/confix/confix.go). Most of the code is copied over from [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/tree/main/tools/confix). --- #### PR checklist - [ ] ~~Tests written/updated~~ - [ ] ~~Changelog entry added in `.changelog` (we use [unclog](https://github.com/informalsystems/unclog) to manage our changelog)~~ - [ ] ~~Updated relevant documentation (`docs/` or `spec/`) and code comments~~ - [x] Title follows the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) spec --- .../unreleased/improvements/3036-confix.md | 3 + .golangci.yml | 7 +- cmd/cometbft/commands/config/common.go | 18 + cmd/cometbft/commands/config/config.go | 24 + cmd/cometbft/commands/config/diff.go | 55 ++ cmd/cometbft/commands/config/migrate.go | 69 ++ cmd/cometbft/commands/config/mutate.go | 144 +++++ cmd/cometbft/commands/config/view.go | 52 ++ cmd/cometbft/commands/root.go | 34 +- cmd/cometbft/main.go | 2 + go.mod | 2 + internal/confix/README.md | 54 ++ internal/confix/data/v0.34.toml | 487 ++++++++++++++ internal/confix/data/v0.37.toml | 492 ++++++++++++++ internal/confix/data/v0.38.toml | 482 ++++++++++++++ internal/confix/data/v1.0.toml | 599 ++++++++++++++++++ internal/confix/diff.go | 180 ++++++ internal/confix/doc.go | 4 + internal/confix/file.go | 35 + internal/confix/log.go | 14 + internal/confix/migrations.go | 121 ++++ internal/confix/upgrade.go | 82 +++ internal/confix/upgrade_test.go | 34 + 23 files changed, 2982 insertions(+), 12 deletions(-) create mode 100644 .changelog/unreleased/improvements/3036-confix.md create mode 100644 cmd/cometbft/commands/config/common.go create mode 100644 cmd/cometbft/commands/config/config.go create mode 100644 cmd/cometbft/commands/config/diff.go create mode 100644 cmd/cometbft/commands/config/migrate.go create mode 100644 cmd/cometbft/commands/config/mutate.go create mode 100644 cmd/cometbft/commands/config/view.go create mode 100644 internal/confix/README.md create mode 100644 internal/confix/data/v0.34.toml create mode 100644 internal/confix/data/v0.37.toml create mode 100644 internal/confix/data/v0.38.toml create mode 100644 internal/confix/data/v1.0.toml create mode 100644 internal/confix/diff.go create mode 100644 internal/confix/doc.go create mode 100644 internal/confix/file.go create mode 100644 internal/confix/log.go create mode 100644 internal/confix/migrations.go create mode 100644 internal/confix/upgrade.go create mode 100644 internal/confix/upgrade_test.go diff --git a/.changelog/unreleased/improvements/3036-confix.md b/.changelog/unreleased/improvements/3036-confix.md new file mode 100644 index 00000000000..8e4a2514ce9 --- /dev/null +++ b/.changelog/unreleased/improvements/3036-confix.md @@ -0,0 +1,3 @@ +- `[cmd/cometbft]` Add `cometbft config` cmd to view, modify and + upgrade configs across different versions + ([\#3036](https://github.com/cometbft/cometbft/pull/3036)) diff --git a/.golangci.yml b/.golangci.yml index c46c67a044e..88d764187d2 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -94,8 +94,13 @@ linters-settings: - "!$test" allow: - $gostd + - github.com/BurntSushi/toml + - github.com/Masterminds/semver/v3 + - github.com/btcsuite/btcd/btcec/v2 - github.com/cometbft - github.com/cosmos + - github.com/creachadair/atomicfile + - github.com/creachadair/tomledit - github.com/btcsuite/btcd/btcec/v2 - github.com/BurntSushi/toml - github.com/dgraph-io/badger/v4 @@ -108,9 +113,9 @@ linters-settings: - github.com/hashicorp/golang-lru/v2 - github.com/lib/pq - github.com/libp2p/go-buffer-pool - - github.com/Masterminds/semver/v3 - github.com/minio/highwayhash - github.com/oasisprotocol/curve25519-voi + - github.com/pelletier/go-toml/v2 - github.com/pkg/errors - github.com/prometheus - github.com/rcrowley/go-metrics diff --git a/cmd/cometbft/commands/config/common.go b/cmd/cometbft/commands/config/common.go new file mode 100644 index 00000000000..48fd09dfe13 --- /dev/null +++ b/cmd/cometbft/commands/config/common.go @@ -0,0 +1,18 @@ +package config + +import ( + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/cometbft/cometbft/cmd/cometbft/commands" + cfg "github.com/cometbft/cometbft/config" +) + +func defaultConfigPath(cmd *cobra.Command) string { + home, err := commands.ConfigHome(cmd) + if err != nil { + return "" + } + return filepath.Join(home, cfg.DefaultConfigDir, cfg.DefaultConfigFileName) +} diff --git a/cmd/cometbft/commands/config/config.go b/cmd/cometbft/commands/config/config.go new file mode 100644 index 00000000000..32765cb2463 --- /dev/null +++ b/cmd/cometbft/commands/config/config.go @@ -0,0 +1,24 @@ +package config + +import ( + "github.com/spf13/cobra" +) + +// Command contains all the confix commands +// These command can be used to interactively update a config value. +func Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "config", + Short: "Utilities for managing configuration", + } + + cmd.AddCommand( + MigrateCommand(), + DiffCommand(), + GetCommand(), + SetCommand(), + ViewCommand(), + ) + + return cmd +} diff --git a/cmd/cometbft/commands/config/diff.go b/cmd/cometbft/commands/config/diff.go new file mode 100644 index 00000000000..17660c73782 --- /dev/null +++ b/cmd/cometbft/commands/config/diff.go @@ -0,0 +1,55 @@ +package config + +import ( + "fmt" + + "github.com/spf13/cobra" + "golang.org/x/exp/maps" + + "github.com/cometbft/cometbft/internal/confix" +) + +// DiffCommand creates a new command for comparing configuration files. +func DiffCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "diff [target-version] ", + Short: "Outputs all config values that are different from the default.", + Long: "This command compares the configuration file with the defaults and outputs any differences.", + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var configPath string + if len(args) > 1 { + configPath = args[1] + } else { + configPath = defaultConfigPath(cmd) + } + + targetVersion := args[0] + if _, ok := confix.Migrations[targetVersion]; !ok { + return fmt.Errorf("unknown version %q, supported versions are: %q", targetVersion, maps.Keys(confix.Migrations)) + } + + targetVersionFile, err := confix.LoadLocalConfig(targetVersion + ".toml") + if err != nil { + return fmt.Errorf("failed to load internal config: %w", err) + } + + rawFile, err := confix.LoadConfig(configPath) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + diff := confix.DiffValues(rawFile, targetVersionFile) + if len(diff) == 0 { + fmt.Print("All config values are the same as the defaults.\n") + } + + fmt.Print("The following config values are different from the defaults:\n") + + confix.PrintDiff(cmd.OutOrStdout(), diff) + return nil + }, + } + + return cmd +} diff --git a/cmd/cometbft/commands/config/migrate.go b/cmd/cometbft/commands/config/migrate.go new file mode 100644 index 00000000000..8e88c9fe43e --- /dev/null +++ b/cmd/cometbft/commands/config/migrate.go @@ -0,0 +1,69 @@ +package config + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + "golang.org/x/exp/maps" + + "github.com/cometbft/cometbft/internal/confix" +) + +var ( + FlagStdOut bool + FlagVerbose bool + FlagSkipValidate bool +) + +func MigrateCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "migrate [target-version] ", + Short: "Migrate configuration file to the specified version", + Long: `Migrate the contents of the configuration to the specified version. +The output is written in-place unless --stdout is provided. +In case of any error in updating the file, no output is written.`, + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var configPath string + if len(args) > 1 { + configPath = args[1] + } else { + configPath = defaultConfigPath(cmd) + } + + targetVersion := args[0] + plan, ok := confix.Migrations[targetVersion] + if !ok { + return fmt.Errorf("unknown version %q, supported versions are: %q", targetVersion, maps.Keys(confix.Migrations)) + } + + rawFile, err := confix.LoadConfig(configPath) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + ctx := context.Background() + if FlagVerbose { + ctx = confix.WithLogWriter(ctx, cmd.ErrOrStderr()) + } + + outputPath := configPath + if FlagStdOut { + outputPath = "" + } + + if err := confix.Upgrade(ctx, plan(rawFile, targetVersion), configPath, outputPath, FlagSkipValidate); err != nil { + return fmt.Errorf("failed to migrate config: %w", err) + } + + return nil + }, + } + + cmd.Flags().BoolVar(&FlagStdOut, "stdout", false, "print the updated config to stdout") + cmd.Flags().BoolVar(&FlagVerbose, "verbose", false, "log changes to stderr") + cmd.Flags().BoolVar(&FlagSkipValidate, "skip-validate", false, "skip configuration validation (allows to migrate unknown configurations)") + + return cmd +} diff --git a/cmd/cometbft/commands/config/mutate.go b/cmd/cometbft/commands/config/mutate.go new file mode 100644 index 00000000000..c86d56c44ff --- /dev/null +++ b/cmd/cometbft/commands/config/mutate.go @@ -0,0 +1,144 @@ +package config + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/parser" + "github.com/creachadair/tomledit/transform" + "github.com/spf13/cobra" + + "github.com/cometbft/cometbft/internal/confix" +) + +// SetCommand returns a CLI command to interactively update an application config value. +func SetCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "set [config] [key] [value]", + Short: "Set a config value", + Long: "Set a config value. The [config] is an optional absolute path to the config file (default: `~/.cometbft/config/config.toml`)", + Args: cobra.MinimumNArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + var ( + filename, inputValue string + key []string + ) + switch len(args) { + case 2: + { + filename = defaultConfigPath(cmd) + // parse key e.g mempool.size -> [mempool, size] + key = strings.Split(args[0], ".") + inputValue = args[1] + } + case 3: + { + filename, inputValue = args[0], args[2] + key = strings.Split(args[1], ".") + } + default: + return errors.New("expected 2 or 3 arguments") + } + + plan := transform.Plan{ + { + Desc: fmt.Sprintf("update %q=%q in %s", key, inputValue, filename), + T: transform.Func(func(_ context.Context, doc *tomledit.Document) error { + results := doc.Find(key...) + if len(results) == 0 { + return fmt.Errorf("key %q not found", key) + } else if len(results) > 1 { + return fmt.Errorf("key %q is ambiguous", key) + } + + value, err := parser.ParseValue(inputValue) + if err != nil { + value = parser.MustValue(`"` + inputValue + `"`) + } + + if ok := transform.InsertMapping(results[0].Section, &parser.KeyValue{ + Block: results[0].Block, + Name: results[0].Name, + Value: value, + }, true); !ok { + return errors.New("failed to set value") + } + + return nil + }), + }, + } + + outputPath := filename + if FlagStdOut { + outputPath = "" + } + + ctx := cmd.Context() + if FlagVerbose { + ctx = confix.WithLogWriter(ctx, cmd.ErrOrStderr()) + } + + return confix.Upgrade(ctx, plan, filename, outputPath, FlagSkipValidate) + }, + } + + cmd.Flags().BoolVar(&FlagStdOut, "stdout", false, "print the updated config to stdout") + cmd.Flags().BoolVarP(&FlagVerbose, "verbose", "v", false, "log changes to stderr") + cmd.Flags().BoolVarP(&FlagSkipValidate, "skip-validate", "s", false, "skip configuration validation (allows to mutate unknown configurations)") + + return cmd +} + +// GetCommand returns a CLI command to interactively get an application config value. +func GetCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "get [config] [key]", + Short: "Get a config value", + Long: "Get a config value. The [config] is an optional absolute path to the config file (default: `~/.cometbft/config/config.toml`)", + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var ( + filename, key string + keys []string + ) + switch len(args) { + case 1: + { + filename = defaultConfigPath(cmd) + // parse key e.g mempool.size -> [mempool, size] + key = args[0] + keys = strings.Split(key, ".") + } + case 2: + { + filename = args[0] + key = args[1] + keys = strings.Split(key, ".") + } + default: + return errors.New("expected 1 or 2 arguments") + } + + doc, err := confix.LoadConfig(filename) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + results := doc.Find(keys...) + if len(results) == 0 { + return fmt.Errorf("key %q not found", key) + } else if len(results) > 1 { + return fmt.Errorf("key %q is ambiguous", key) + } + + fmt.Printf("%s\n", results[0].Value.String()) + return nil + }, + } + + return cmd +} diff --git a/cmd/cometbft/commands/config/view.go b/cmd/cometbft/commands/config/view.go new file mode 100644 index 00000000000..3940f5e8bb7 --- /dev/null +++ b/cmd/cometbft/commands/config/view.go @@ -0,0 +1,52 @@ +package config + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/pelletier/go-toml/v2" + "github.com/spf13/cobra" +) + +func ViewCommand() *cobra.Command { + flagOutputFormat := "output-format" + + cmd := &cobra.Command{ + Use: "view [config]", + Short: "View the config file", + Long: "View the config file. The [config] is an optional absolute path to the config file (default: `~/.cometbft/config/config.toml`)", + RunE: func(cmd *cobra.Command, args []string) error { + var filename string + if len(args) > 0 { + filename = args[0] + } else { + filename = defaultConfigPath(cmd) + } + + file, err := os.ReadFile(filename) + if err != nil { + return err + } + + if format, _ := cmd.Flags().GetString(flagOutputFormat); format == "toml" { + cmd.Println(string(file)) + return nil + } + + var v any + if err := toml.Unmarshal(file, &v); err != nil { + return fmt.Errorf("failed to decode config file: %w", err) + } + + e := json.NewEncoder(cmd.OutOrStdout()) + e.SetIndent("", " ") + return e.Encode(v) + }, + } + + // output flag + cmd.Flags().String(flagOutputFormat, "toml", "Output format (json|toml)") + + return cmd +} diff --git a/cmd/cometbft/commands/root.go b/cmd/cometbft/commands/root.go index d1d3553ac6d..d703cd1bee5 100644 --- a/cmd/cometbft/commands/root.go +++ b/cmd/cometbft/commands/root.go @@ -26,15 +26,7 @@ func registerFlagsRootCmd(cmd *cobra.Command) { cmd.PersistentFlags().String("log_level", config.LogLevel, "log level") } -// ParseConfig retrieves the default environment configuration, -// sets up the CometBFT root and ensures that the root exists. -func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { - conf := cfg.DefaultConfig() - err := viper.Unmarshal(conf) - if err != nil { - return nil, err - } - +func ConfigHome(cmd *cobra.Command) (string, error) { var home string switch { case os.Getenv("CMTHOME") != "": @@ -42,14 +34,34 @@ func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { case os.Getenv("TMHOME") != "": // XXX: Deprecated. home = os.Getenv("TMHOME") - logger.Error("Deprecated environment variable TMHOME identified. CMTHOME should be used instead.") default: + var err error + // Default: $HOME/.cometbft home, err = cmd.Flags().GetString(cli.HomeFlag) if err != nil { - return nil, err + return "", err } } + return home, nil +} + +// ParseConfig retrieves the default environment configuration, +// sets up the CometBFT root and ensures that the root exists. +func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { + conf := cfg.DefaultConfig() + err := viper.Unmarshal(conf) + if err != nil { + return nil, err + } + + if os.Getenv("TMHOME") != "" { + logger.Error("Deprecated environment variable TMHOME identified. CMTHOME should be used instead.") + } + home, err := ConfigHome(cmd) + if err != nil { + return nil, err + } conf.RootDir = home conf.SetRoot(conf.RootDir) diff --git a/cmd/cometbft/main.go b/cmd/cometbft/main.go index 0ca1ce8d24d..5892c8251a5 100644 --- a/cmd/cometbft/main.go +++ b/cmd/cometbft/main.go @@ -5,6 +5,7 @@ import ( "path/filepath" cmd "github.com/cometbft/cometbft/cmd/cometbft/commands" + "github.com/cometbft/cometbft/cmd/cometbft/commands/config" "github.com/cometbft/cometbft/cmd/cometbft/commands/debug" cfg "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/libs/cli" @@ -29,6 +30,7 @@ func main() { cmd.CompactGoLevelDBCmd, cmd.InspectCmd, debug.DebugCmd, + config.Command(), cli.NewCompletionCmd(rootCmd, true), ) diff --git a/go.mod b/go.mod index ac73d472c70..6d7c6f52948 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,8 @@ require ( github.com/BurntSushi/toml v1.4.0 github.com/adlio/schema v1.3.6 github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/creachadair/atomicfile v0.3.4 + github.com/creachadair/tomledit v0.0.26 github.com/fortytw2/leaktest v1.3.0 github.com/go-kit/kit v0.13.0 github.com/go-kit/log v0.2.1 diff --git a/internal/confix/README.md b/internal/confix/README.md new file mode 100644 index 00000000000..131bea414f0 --- /dev/null +++ b/internal/confix/README.md @@ -0,0 +1,54 @@ +# Confix + +`Confix` is a configuration management tool that allows you to manage your configuration via CLI. + +It is based on the [CometBFT RFC 019](https://github.com/cometbft/cometbft/blob/5013bc3f4a6d64dcc2bf02ccc002ebc9881c62e4/docs/rfc/rfc-019-config-version.md). + +## Usage + +### Get + +Get a configuration value, e.g.: + +```shell +cometbft config get mempool.size # gets the value mempool.size +cometbft config get moniker # gets the value moniker +``` + +### Set + +Set a configuration value, e.g.: + +```shell +cometbft config set mempool.size 1000 # sets the value mempool.size +cometbft config set moniker "foo-1" # sets the value moniker +``` +### Migrate + +Migrate a configuration file to a new version: + +```shell +cometbft config migrate v0.38 # migrates defaultHome/config/config.toml to the latest v0.38 config +``` + +### Diff + +Get the diff between a given configuration file and the default configuration +file, e.g.: + +```shell +cometbft config diff v0.38 # gets the diff between defaultHome/config/config.toml and the latest v0.38 config +``` + +### View + +View a configuration file, e.g: + +```shell +cometbft config view # views the current config +``` + +## Credits + +This project is based on the [CometBFT RFC 019](https://github.com/cometbft/cometbft/blob/5013bc3f4a6d64dcc2bf02ccc002ebc9881c62e4/docs/rfc/rfc-019-config-version.md) and their own implementation of [confix](https://github.com/cometbft/cometbft/blob/v0.36.x/scripts/confix/confix.go). +Most of the code is copied over from [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/tree/main/tools/confix). diff --git a/internal/confix/data/v0.34.toml b/internal/confix/data/v0.34.toml new file mode 100644 index 00000000000..35df417ff4e --- /dev/null +++ b/internal/confix/data/v0.34.toml @@ -0,0 +1,487 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable +# or --home cmd flag. + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the CometBFT binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "test" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "info" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for CometBFT to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behaviour. +experimental_close_on_slow_client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. ip and port are required +# example: 159.89.10.97:26656 +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +# Mempool version to use: +# 1) "v0" - (default) FIFO mempool. +# 2) "v1" - prioritized mempool. +version = "v0" + +# Recheck (default: true) defines whether CometBFT should recheck the +# validity for all remaining transaction in the mempool after a block. +# Since a block affects the application state, some transactions in the +# mempool may become invalid. If this does not apply to your application, +# you can disable rechecking. +recheck = true +broadcast = true +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. +max_tx_bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max_batch_bytes = 0 + +# ttl-duration, if non-zero, defines the maximum amount of time a transaction +# can exist for in the mempool. +# +# Note, if ttl-num-blocks is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if it's +# insertion time into the mempool is beyond ttl-duration. +ttl-duration = "0s" + +# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction +# can exist for in the mempool. +# +# Note, if ttl-duration is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if +# it's insertion time into the mempool is beyond ttl-duration. +ttl-num-blocks = 0 + +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# This feature is only available for the default mempool (version config set to "v0"). +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = 0 +experimental_max_gossip_connections_to_non_persistent_peers = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + +####################################################### +### Fast Sync Configuration Connections ### +####################################################### +[fastsync] + +# Fast Sync version to use: +# 1) "v0" (default) - the legacy fast sync implementation +# 2) "v1" - refactor of v0 version for better testability +# 2) "v2" - complete redesign of v0, optimized for testability & readability +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout_prevote = "1s" +# How much the timeout_prevote increases with each round +timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout_precommit = "1s" +# How much the timeout_precommit increases with each round +timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "1s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = false + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "cometbft" diff --git a/internal/confix/data/v0.37.toml b/internal/confix/data/v0.37.toml new file mode 100644 index 00000000000..1591199ee66 --- /dev/null +++ b/internal/confix/data/v0.37.toml @@ -0,0 +1,492 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable +# or --home cmd flag. + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the CometBFT binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "test" + +# If this node is many blocks behind the tip of the chain, BlockSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +# +# Deprecated: this key will be removed and BlockSync will be enabled +# unconditionally in the next major release. +block_sync = true + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "info" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for CometBFT to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behavior. +experimental_close_on_slow_client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial. If empty, will use the same +# port as the laddr, and will introspect on the listener to figure out the +# address. IP and port are required. Example: 159.89.10.97:26656 +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +# Mempool version to use: +# 1) "v0" - (default) FIFO mempool. +# 2) "v1" - prioritized mempool (deprecated; will be removed in the next release). +version = "v0" + +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "flood" + +recheck = true +broadcast = true +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. +max_tx_bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max_batch_bytes = 0 + +# ttl-duration, if non-zero, defines the maximum amount of time a transaction +# can exist for in the mempool. +# +# Note, if ttl-num-blocks is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if it's +# insertion time into the mempool is beyond ttl-duration. +ttl-duration = "0s" + +# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction +# can exist for in the mempool. +# +# Note, if ttl-duration is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if +# it's insertion time into the mempool is beyond ttl-duration. +ttl-num-blocks = 0 + +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# This feature is only available for the default mempool (version config set to "v0"). +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = 0 +experimental_max_gossip_connections_to_non_persistent_peers = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + +####################################################### +### Block Sync Configuration Options ### +####################################################### +[blocksync] + +# Block Sync version to use: +# +# In v0.37, v1 and v2 of the block sync protocols were deprecated. +# Please use v0 instead. +# +# 1) "v0" - the default block sync implementation +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout_prevote = "1s" +# How much the timeout_prevote increases with each round +timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout_precommit = "1s" +# How much the timeout_precommit increases with each round +timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "1s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = false + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "cometbft" diff --git a/internal/confix/data/v0.38.toml b/internal/confix/data/v0.38.toml new file mode 100644 index 00000000000..ad2f94846eb --- /dev/null +++ b/internal/confix/data/v0.38.toml @@ -0,0 +1,482 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable +# or --home cmd flag. + +# The version of the CometBFT binary that created or +# last modified the config file. Do not modify this. +version = "0.38.7" + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the CometBFT binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "test" + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "info" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for CometBFT to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behavior. +experimental_close_on_slow_client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial. If empty, will use the same +# port as the laddr, and will introspect on the listener to figure out the +# address. IP and port are required. Example: 159.89.10.97:26656 +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "flood" + +# Recheck (default: true) defines whether CometBFT should recheck the +# validity for all remaining transaction in the mempool after a block. +# Since a block affects the application state, some transactions in the +# mempool may become invalid. If this does not apply to your application, +# you can disable rechecking. +recheck = true + +# Broadcast (default: true) defines whether the mempool should relay +# transactions to other peers. Setting this to false will stop the mempool +# from relaying transactions to other peers until they are included in a +# block. In other words, if Broadcast is disabled, only the peer you send +# the tx to will see it until it is included in a block. +broadcast = true + +# WalPath (default: "") configures the location of the Write Ahead Log +# (WAL) for the mempool. The WAL is disabled by default. To enable, set +# WalPath to where you want the WAL to be written (e.g. +# "data/mempool.wal"). +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. +max_tx_bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max_batch_bytes = 0 + +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = 0 +experimental_max_gossip_connections_to_non_persistent_peers = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + +####################################################### +### Block Sync Configuration Options ### +####################################################### +[blocksync] + +# Block Sync version to use: +# +# In v0.37, v1 and v2 of the block sync protocols were deprecated. +# Please use v0 instead. +# +# 1) "v0" - the default block sync implementation +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout_prevote = "1s" +# How much the timeout_prevote increases with each round +timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout_precommit = "1s" +# How much the timeout_precommit increases with each round +timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "1s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = false + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "cometbft" diff --git a/internal/confix/data/v1.0.toml b/internal/confix/data/v1.0.toml new file mode 100644 index 00000000000..b5f7edb026f --- /dev/null +++ b/internal/confix/data/v1.0.toml @@ -0,0 +1,599 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable +# or --home cmd flag. + +# The version of the CometBFT binary that created or +# last modified the config file. Do not modify this. +version = "1.0.0-alpha.2" + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the CometBFT binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "test" + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb | pebbledb +# * goleveldb (github.com/syndtr/goleveldb) +# - UNMAINTAINED +# - stable +# - pure go +# * cleveldb (uses levigo wrapper) +# - DEPRECATED +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - DEPRECATED +# - EXPERIMENTAL +# - stable +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/linxGnu/grocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - stable +# - use badgerdb build tag (go build -tags badgerdb) +# * pebbledb (uses github.com/cockroachdb/pebble) +# - EXPERIMENTAL +# - stable +# - pure go +# - use pebbledb build tag (go build -tags pebbledb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "info" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for CometBFT to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe. +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to. +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behavior. +experimental_close_on_slow_client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum number of requests that can be sent in a batch +# If the value is set to '0' (zero-value), then no maximum batch size will be +# enforced for a JSON-RPC batch request. +max_request_batch_size = 10 + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "" + +####################################################### +### gRPC Server Configuration Options ### +####################################################### + +# +# Note that the gRPC server is exposed unauthenticated. It is critical that +# this server not be exposed directly to the public internet. If this service +# must be accessed via the public internet, please ensure that appropriate +# precautions are taken (e.g. fronting with a reverse proxy like nginx with TLS +# termination and authentication, using DDoS protection services like +# CloudFlare, etc.). +# + +[grpc] + +# TCP or UNIX socket address for the RPC server to listen on. If not specified, +# the gRPC server will be disabled. +laddr = "" + +# +# Each gRPC service can be turned on/off, and in some cases configured, +# individually. If the gRPC server is not enabled, all individual services' +# configurations are ignored. +# + +# The gRPC version service provides version information about the node and the +# protocols it uses. +[grpc.version_service] +enabled = true + +# The gRPC block service returns block information +[grpc.block_service] +enabled = true + +# The gRPC block results service returns block results for a given height. If no height +# is given, it will return the block results from the latest height. +[grpc.block_results_service] +enabled = true + +# +# Configuration for privileged gRPC endpoints, which should **never** be exposed +# to the public internet. +# +[grpc.privileged] +# The host/port on which to expose privileged gRPC endpoints. +laddr = "" + +# +# Configuration specifically for the gRPC pruning service, which is considered a +# privileged service. +# +[grpc.privileged.pruning_service] + +# Only controls whether the pruning service is accessible via the gRPC API - not +# whether a previously set pruning service retain height is honored by the +# node. See the [storage.pruning] section for control over pruning. +# +# Disabled by default. +enabled = false + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial. If empty, will use the same +# port as the laddr, and will introspect on the listener to figure out the +# address. IP and port are required. Example: 159.89.10.97:26656 +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "10ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configuration Options ### +####################################################### +[mempool] + +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "flood" + +# recheck (default: true) defines whether CometBFT should recheck the +# validity for all remaining transaction in the mempool after a block. +# Since a block affects the application state, some transactions in the +# mempool may become invalid. If this does not apply to your application, +# you can disable rechecking. +recheck = true + +# recheck_timeout is the time the application has during the rechecking process +# to return CheckTx responses, once all requests have been sent. Responses that +# arrive after the timeout expires are discarded. It only applies to +# non-local ABCI clients and when recheck is enabled. +recheck_timeout = "1s" + +# broadcast (default: true) defines whether the mempool should relay +# transactions to other peers. Setting this to false will stop the mempool +# from relaying transactions to other peers until they are included in a +# block. In other words, if Broadcast is disabled, only the peer you send +# the tx to will see it until it is included in a block. +broadcast = true + +# wal_dir (default: "") configures the location of the Write Ahead Log +# (WAL) for the mempool. The WAL is disabled by default. To enable, set +# wal_dir to where you want the WAL to be written (e.g. +# "data/mempool.wal"). +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Maximum size in bytes of a single transaction accepted into the mempool. +max_tx_bytes = 1048576 + +# The maximum size in bytes of all transactions stored in the mempool. +# This is the raw, total transaction size. For example, given 1MB +# transactions and a 5MB maximum mempool byte size, the mempool will +# only accept five transactions. +max_txs_bytes = 67108864 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = 0 +experimental_max_gossip_connections_to_non_persistent_peers = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + +####################################################### +### Block Sync Configuration Options ### +####################################################### +[blocksync] + +# Block Sync version to use: +# +# In v0.37, v1 and v2 of the block sync protocols were deprecated. +# Please use v0 instead. +# +# 1) "v0" - the default block sync implementation +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes/precommits for “anything” (ie. not a single block or nil) +timeout_vote = "1s" +# How much the timeout_vote increases with each round +timeout_vote_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +# Set to 0 if you want to make progress as soon as the node has all the precommits. +timeout_commit = "1s" + +# Deprecated: set `timeout_commit` to 0 instead. +skip_timeout_commit = false + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_gossip_intraloop_sleep_duration = "0s" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = false + +# The representation of keys in the database. +# The current representation of keys in Comet's stores is considered to be v1 +# Users can experiment with a different layout by setting this field to v2. +# Note that this is an experimental feature and switching back from v2 to v1 +# is not supported by CometBFT. +# If the database was initially created with v1, it is necessary to migrate the DB +# before switching to v2. The migration is not done automatically. +# v1 - the legacy layout existing in Comet prior to v1. +# v2 - Order preserving representation ordering entries by height. +experimental_db_key_layout = "v1" + +# If set to true, CometBFT will force compaction to happen for databases that support this feature. +# and save on storage space. Setting this to true is most benefits when used in combination +# with pruning as it will physically delete the entries marked for deletion. +# false by default (forcing compaction is disabled). +compact = false + +# To avoid forcing compaction every time, this parameter instructs CometBFT to wait +# the given amount of blocks to be pruned before triggering compaction. +# It should be tuned depending on the number of items. If your retain height is 1 block, +# it is too much of an overhead to try compaction every block. But it should also not be a very +# large multiple of your retain height as it might occur bigger overheads. +compaction_interval = "1000" + +# Hash of the Genesis file (as hex string), passed to CometBFT via the command line. +# If this hash mismatches the hash that CometBFT computes on the genesis file, +# the node is not able to boot. +genesis_hash = "" + +[storage.pruning] + +# The time period between automated background pruning operations. +interval = "10s" + +# +# Storage pruning configuration relating only to the data companion. +# +[storage.pruning.data_companion] + +# Whether automatic pruning respects values set by the data companion. Disabled +# by default. All other parameters in this section are ignored when this is +# disabled. +# +# If disabled, only the application retain height will influence block pruning +# (but not block results pruning). Only enabling this at a later stage will +# potentially mean that blocks below the application-set retain height at the +# time will not be available to the data companion. +enabled = false + +# The initial value for the data companion block retain height if the data +# companion has not yet explicitly set one. If the data companion has already +# set a block retain height, this is ignored. +initial_block_retain_height = 0 + +# The initial value for the data companion block results retain height if the +# data companion has not yet explicitly set one. If the data companion has +# already set a block results retain height, this is ignored. +initial_block_results_retain_height = 0 + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "cometbft" diff --git a/internal/confix/diff.go b/internal/confix/diff.go new file mode 100644 index 00000000000..6d25c8412ba --- /dev/null +++ b/internal/confix/diff.go @@ -0,0 +1,180 @@ +package confix + +import ( + "fmt" + "io" + "sort" + + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/parser" + "github.com/creachadair/tomledit/transform" +) + +type DiffType string + +const ( + Section DiffType = "S" + Mapping DiffType = "M" +) + +type KV struct { + Key string + Value string + Block []string // comment block +} + +type Diff struct { + Type DiffType + Deleted bool + + KV KV +} + +// DiffKeys diffs the keyspaces of the TOML documents in files lhs and rhs. +// Comments, order, and values are ignored for comparison purposes. +func DiffKeys(lhs, rhs *tomledit.Document) []Diff { + // diff sections + diff := diffDocs(allKVs(lhs.Global), allKVs(rhs.Global), false) + + lsec, rsec := lhs.Sections, rhs.Sections + transform.SortSectionsByName(lsec) + transform.SortSectionsByName(rsec) + + i, j := 0, 0 + for i < len(lsec) && j < len(rsec) { + switch { + case lsec[i].Name.Before(rsec[j].Name): + diff = append(diff, Diff{Type: Section, Deleted: true, KV: KV{Key: lsec[i].Name.String()}}) + for _, kv := range allKVs(lsec[i]) { + diff = append(diff, Diff{Type: Mapping, Deleted: true, KV: kv}) + } + i++ + case rsec[j].Name.Before(lsec[i].Name): + diff = append(diff, Diff{Type: Section, KV: KV{Key: rsec[j].Name.String()}}) + for _, kv := range allKVs(rsec[j]) { + diff = append(diff, Diff{Type: Mapping, KV: kv}) + } + j++ + default: + diff = append(diff, diffDocs(allKVs(lsec[i]), allKVs(rsec[j]), false)...) + i++ + j++ + } + } + for ; i < len(lsec); i++ { + diff = append(diff, Diff{Type: Section, Deleted: true, KV: KV{Key: lsec[i].Name.String()}}) + for _, kv := range allKVs(lsec[i]) { + diff = append(diff, Diff{Type: Mapping, Deleted: true, KV: kv}) + } + } + for ; j < len(rsec); j++ { + diff = append(diff, Diff{Type: Section, KV: KV{Key: rsec[j].Name.String()}}) + for _, kv := range allKVs(rsec[j]) { + diff = append(diff, Diff{Type: Mapping, KV: kv}) + } + } + + return diff +} + +// DiffValues diffs the keyspaces with different values of the TOML documents in files lhs and rhs. +func DiffValues(lhs, rhs *tomledit.Document) []Diff { + diff := diffDocs(allKVs(lhs.Global), allKVs(rhs.Global), true) + + lsec, rsec := lhs.Sections, rhs.Sections + transform.SortSectionsByName(lsec) + transform.SortSectionsByName(rsec) + + i, j := 0, 0 + for i < len(lsec) && j < len(rsec) { + switch { + case lsec[i].Name.Before(rsec[j].Name): + // skip keys present in lhs but not in rhs + i++ + case rsec[j].Name.Before(lsec[i].Name): + // skip keys present in rhs but not in lhs + j++ + default: + for _, d := range diffDocs(allKVs(lsec[i]), allKVs(rsec[j]), true) { + if !d.Deleted { + diff = append(diff, d) + } + } + i++ + j++ + } + } + + return diff +} + +func allKVs(s *tomledit.Section) []KV { + keys := []KV{} + s.Scan(func(key parser.Key, entry *tomledit.Entry) bool { + keys = append(keys, KV{ + Key: key.String(), + // we get the value of the current configuration (i.e the one we want to compare/migrate) + Value: entry.Value.String(), + Block: entry.Block, + }) + + return true + }) + return keys +} + +// diffDocs get the diff between all keys in lhs and rhs. +// when a key is in both lhs and rhs, it is ignored, unless value is true in which case the value is as well compared. +func diffDocs(lhs, rhs []KV, value bool) []Diff { + diff := []Diff{} + + sort.Slice(lhs, func(i, j int) bool { + return lhs[i].Key < lhs[j].Key + }) + sort.Slice(rhs, func(i, j int) bool { + return rhs[i].Key < rhs[j].Key + }) + + i, j := 0, 0 + for i < len(lhs) && j < len(rhs) { + switch { + case lhs[i].Key < rhs[j].Key: + diff = append(diff, Diff{Type: Mapping, Deleted: true, KV: lhs[i]}) + i++ + case lhs[i].Key > rhs[j].Key: + diff = append(diff, Diff{Type: Mapping, KV: rhs[j]}) + j++ + default: + // key exists in both lhs and rhs + // if value is true, compare the values + if value && lhs[i].Value != rhs[j].Value { + diff = append(diff, Diff{Type: Mapping, KV: lhs[i]}) + } + i++ + j++ + } + } + for ; i < len(lhs); i++ { + diff = append(diff, Diff{Type: Mapping, Deleted: true, KV: lhs[i]}) + } + for ; j < len(rhs); j++ { + diff = append(diff, Diff{Type: Mapping, KV: rhs[j]}) + } + + return diff +} + +// PrintDiff output prints one line per key that differs: +// -S name -- section exists in f1 but not f2 +// +S name -- section exists in f2 but not f1 +// -M name -- mapping exists in f1 but not f2 +// +M name -- mapping exists in f2 but not f1. +func PrintDiff(w io.Writer, diffs []Diff) { + for _, diff := range diffs { + if diff.Deleted { + fmt.Fprintln(w, fmt.Sprintf("-%s", diff.Type), fmt.Sprintf("%s=%s", diff.KV.Key, diff.KV.Value)) + } else { + fmt.Fprintln(w, fmt.Sprintf("+%s", diff.Type), fmt.Sprintf("%s=%s", diff.KV.Key, diff.KV.Value)) + } + } +} diff --git a/internal/confix/doc.go b/internal/confix/doc.go new file mode 100644 index 00000000000..83110931f09 --- /dev/null +++ b/internal/confix/doc.go @@ -0,0 +1,4 @@ +// Package confix applies changes to a CometBFT TOML configuration file, to +// update configurations created with an older version of CometBFT to a +// compatible format for a newer version. +package confix diff --git a/internal/confix/file.go b/internal/confix/file.go new file mode 100644 index 00000000000..0d16f0bed2c --- /dev/null +++ b/internal/confix/file.go @@ -0,0 +1,35 @@ +package confix + +import ( + "embed" + "fmt" + "os" + "path/filepath" + + "github.com/creachadair/tomledit" +) + +//go:embed data +var data embed.FS + +// LoadLocalConfig loads and parses the TOML document from confix data. +func LoadLocalConfig(fileName string) (*tomledit.Document, error) { + f, err := data.Open(filepath.Join("data", fileName)) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w. This file should have been included in confix", err) + } + defer f.Close() + + return tomledit.Parse(f) +} + +// LoadConfig loads and parses the TOML document from path. +func LoadConfig(path string) (*tomledit.Document, error) { + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open %q: %w", path, err) + } + defer f.Close() + + return tomledit.Parse(f) +} diff --git a/internal/confix/log.go b/internal/confix/log.go new file mode 100644 index 00000000000..67d9bc47d74 --- /dev/null +++ b/internal/confix/log.go @@ -0,0 +1,14 @@ +package confix + +import ( + "context" + "io" + + "github.com/creachadair/tomledit/transform" +) + +// WithLogWriter returns a child of ctx with a logger attached that sends +// output to w. This is a convenience wrapper for transform.WithLogWriter. +func WithLogWriter(ctx context.Context, w io.Writer) context.Context { + return transform.WithLogWriter(ctx, w) +} diff --git a/internal/confix/migrations.go b/internal/confix/migrations.go new file mode 100644 index 00000000000..f9fbec4b597 --- /dev/null +++ b/internal/confix/migrations.go @@ -0,0 +1,121 @@ +package confix + +import ( + "context" + "fmt" + "strings" + + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/parser" + "github.com/creachadair/tomledit/transform" + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +const ( + Config = "config.toml" +) + +// MigrationMap defines a mapping from a version to a transformation plan. +type MigrationMap map[string]func(from *tomledit.Document, to string) transform.Plan + +var Migrations = MigrationMap{ + "v0.34": NoPlan, + "v0.37": PlanBuilder, + "v0.38": PlanBuilder, + "v1.0": PlanBuilder, + // "v0.xx.x": PlanBuilder, // add specific migration in case of configuration changes in minor versions +} + +// PlanBuilder is a function that returns a transformation plan for a given diff between two files. +func PlanBuilder(from *tomledit.Document, to string) transform.Plan { + plan := transform.Plan{} + deletedSections := map[string]bool{} + + target, err := LoadLocalConfig(to + ".toml") + if err != nil { + panic(fmt.Errorf("failed to parse file: %w. This file should have been valid", err)) + } + + diffs := DiffKeys(from, target) + for _, diff := range diffs { + kv := diff.KV + + var step transform.Step + keys := strings.Split(kv.Key, ".") + + if !diff.Deleted { + switch diff.Type { + case Section: + step = transform.Step{ + Desc: fmt.Sprintf("add %s section", kv.Key), + T: transform.Func(func(_ context.Context, doc *tomledit.Document) error { + caser := cases.Title(language.English) + title := fmt.Sprintf("### %s Configuration ###", caser.String(kv.Key)) + doc.Sections = append(doc.Sections, &tomledit.Section{ + Heading: &parser.Heading{ + Block: parser.Comments{ + strings.Repeat("#", len(title)), + title, + strings.Repeat("#", len(title)), + }, + Name: keys, + }, + }) + return nil + }), + } + case Mapping: + if len(keys) == 1 { // top-level key + step = transform.Step{ + Desc: fmt.Sprintf("add %s key", kv.Key), + T: transform.EnsureKey(nil, &parser.KeyValue{ + Block: kv.Block, + Name: parser.Key{keys[0]}, + Value: parser.MustValue(kv.Value), + }), + } + } else if len(keys) > 1 { + step = transform.Step{ + Desc: fmt.Sprintf("add %s key", kv.Key), + T: transform.EnsureKey(keys[0:len(keys)-1], &parser.KeyValue{ + Block: kv.Block, + Name: parser.Key{keys[len(keys)-1]}, + Value: parser.MustValue(kv.Value), + }), + } + } + default: + panic(fmt.Errorf("unknown diff type: %s", diff.Type)) + } + } else { + if diff.Type == Section { + deletedSections[kv.Key] = true + step = transform.Step{ + Desc: fmt.Sprintf("remove %s section", kv.Key), + T: transform.Remove(keys), + } + } else { + // when the whole section is deleted we don't need to remove the keys + if len(keys) > 1 && deletedSections[keys[0]] { + continue + } + + step = transform.Step{ + Desc: fmt.Sprintf("remove %s key", kv.Key), + T: transform.Remove(keys), + } + } + } + + plan = append(plan, step) + } + + return plan +} + +// NoPlan returns a no-op plan. +func NoPlan(_ *tomledit.Document, to string) transform.Plan { + fmt.Printf("no migration needed to %s\n", to) + return transform.Plan{} +} diff --git a/internal/confix/upgrade.go b/internal/confix/upgrade.go new file mode 100644 index 00000000000..aca921d84de --- /dev/null +++ b/internal/confix/upgrade.go @@ -0,0 +1,82 @@ +package confix + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + + "github.com/creachadair/atomicfile" + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/transform" + "github.com/spf13/viper" + + "github.com/cometbft/cometbft/config" +) + +// Upgrade reads the configuration file at configPath and applies any +// transformations necessary to Upgrade it to the current version. If this +// succeeds, the transformed output is written to outputPath. As a special +// case, if outputPath == "" the output is written to stdout. +// +// It is safe if outputPath == inputPath. If a regular file outputPath already +// exists, it is overwritten. In case of error, the output is not written. +// +// Upgrade is a convenience wrapper for calls to LoadConfig, ApplyFixes, and +// CheckValid. If the caller requires more control over the behavior of the +// Upgrade, call those functions directly. +func Upgrade(ctx context.Context, plan transform.Plan, configPath, outputPath string, skipValidate bool) error { + if configPath == "" { + return errors.New("empty input configuration path") + } + + doc, err := LoadConfig(configPath) + if err != nil { + return fmt.Errorf("loading config: %w", err) + } + + // transforms doc and reports whether it succeeded. + if err := plan.Apply(ctx, doc); err != nil { + return fmt.Errorf("updating %q: %w", configPath, err) + } + + var buf bytes.Buffer + if err := tomledit.Format(&buf, doc); err != nil { + return fmt.Errorf("formatting config: %w", err) + } + + // allow to skip validation + if !skipValidate { + // verify that file is valid after applying fixes + if err := CheckValid(buf.Bytes()); err != nil { + return fmt.Errorf("updated config is invalid: %w", err) + } + } + + if outputPath == "" { + _, err = os.Stdout.Write(buf.Bytes()) + } else { + err = atomicfile.WriteData(outputPath, buf.Bytes(), 0o600) + } + + return err +} + +// CheckValid checks whether the specified config appears to be a valid CometBFT config file. +// It tries to unmarshal the config into both the server and client config structs. +func CheckValid(data []byte) error { + v := viper.New() + v.SetConfigType("toml") + + if err := v.ReadConfig(bytes.NewReader(data)); err != nil { + return fmt.Errorf("reading config: %w", err) + } + + var cfg config.Config + if err := v.Unmarshal(&cfg); err != nil { + return fmt.Errorf("failed to unmarshal as config: %w", err) + } + + return nil +} diff --git a/internal/confix/upgrade_test.go b/internal/confix/upgrade_test.go new file mode 100644 index 00000000000..a805f18171a --- /dev/null +++ b/internal/confix/upgrade_test.go @@ -0,0 +1,34 @@ +package confix_test + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/cometbft/cometbft/internal/confix" +) + +func mustReadConfig(t *testing.T, path string) []byte { + t.Helper() + f, err := os.ReadFile(path) + if err != nil { + t.Fatalf("failed to open file: %v", err) + } + + return f +} + +func TestCheckValid(t *testing.T) { + err := confix.CheckValid(mustReadConfig(t, "data/v0.34.toml")) + assert.NoError(t, err) + + err = confix.CheckValid(mustReadConfig(t, "data/v0.37.toml")) + assert.NoError(t, err) + + err = confix.CheckValid(mustReadConfig(t, "data/v0.38.toml")) + assert.NoError(t, err) + + err = confix.CheckValid(mustReadConfig(t, "data/v1.0.toml")) + assert.NoError(t, err) +}