diff --git a/stellar/Dockerfile b/stellar/Dockerfile new file mode 100644 index 0000000..100098e --- /dev/null +++ b/stellar/Dockerfile @@ -0,0 +1,27 @@ +FROM debian:stretch + +# git tag from https://github.com/stellar/stellar-core +ARG STELLAR_CORE_VERSION="v13.2.0" +ARG STELLAR_CORE_BUILD_DEPS="git build-essential pkg-config autoconf automake libtool bison flex libpq-dev wget pandoc" +ARG STELLAR_CORE_DEPS="curl jq libpq5" + +ADD scripts/* / +RUN /install.sh + +VOLUME /data + +# peer port +EXPOSE 11625 + +# HTTP port +EXPOSE 11626 + +# configuration options, see here for docs: +# https://github.com/stellar/stellar-core/blob/master/docs/stellar-core_example.cfg +ENV HTTP_MAX_CLIENT="128" \ + NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015" \ + INITIALIZE_DB=true + +ENTRYPOINT ["/docker-entrypoint.sh"] + +CMD ["/usr/local/bin/stellar-core", "run", "--conf", "/stellar-core.cfg"] diff --git a/stellar/README.md b/stellar/README.md new file mode 100644 index 0000000..5f96bfe --- /dev/null +++ b/stellar/README.md @@ -0,0 +1,11 @@ +# Stellar + +## Start the cryptonode + +```shell +docker-compose up -Vd *mainnet or testnet*` +``` + +## Usage + +[Stellar Horizon API reference](https://developers.stellar.org/api/introduction/) diff --git a/stellar/VERSION b/stellar/VERSION new file mode 100644 index 0000000..67aee23 --- /dev/null +++ b/stellar/VERSION @@ -0,0 +1 @@ +13.2.0 diff --git a/stellar/config/stellar-core.cfg b/stellar/config/stellar-core.cfg new file mode 100644 index 0000000..1d77339 --- /dev/null +++ b/stellar/config/stellar-core.cfg @@ -0,0 +1,615 @@ +# +# This file gives details of the various configuration parameters you can set +# when running stellar-core. You will need to edit to fit your own set up. +# +# This is a TOML file. See https://github.com/toml-lang/toml for syntax. + + +########################### +## General admin settings + + +# LOG_FILE_PATH (string) default "stellar-core.log" +# Path to the file you want stellar-core to write its log to. +# You can set to "" for no log file. +LOG_FILE_PATH="" + +# BUCKET_DIR_PATH (string) default "buckets" +# Specifies the directory where stellar-core should store the bucket list. +# This will get written to a lot and will grow as the size of the ledger grows. +BUCKET_DIR_PATH="buckets" + + +# DATABASE (string) default "sqlite3://:memory:" +# Sets the DB connection string for SOCI. +# Defaults to an in memory database. +# If using sqlite, a string like: +# +# "sqlite3://path/to/dbname.db" +# +# alternatively, if using postgresql, a string like: +# +# "postgresql://dbname=stellar user=xxxx password=yyyy host=10.0.x.y" +# +# taking any combination of parameters from: +# +# http://www.postgresql.org/docs/devel/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS +# +DATABASE="sqlite3://stellar.db" + +# Data layer cache configuration +# - ENTRY_CACHE_SIZE controls the maximum number of LedgerEntry objects +# that will be stored in the cache (default 4096) +# - BEST_OFFERS_CACHE_SIZE controls the maximum number of Asset pairs that +# will be stored in the cache, although many LedgerEntry objects may be +# associated with a single Asset pair (default 64) +# - PREFETCH_BATCH_SIZE determines batch size for bulk loads used for +# prefetching +ENTRY_CACHE_SIZE=4096 +BEST_OFFERS_CACHE_SIZE=64 +PREFETCH_BATCH_SIZE=1000 + +# HTTP_PORT (integer) default 11626 +# What port stellar-core listens for commands on. +HTTP_PORT=11626 + +# PUBLIC_HTTP_PORT (true or false) default false +# If false you only accept stellar commands from localhost. +# Do not set to true and expose the port to the open internet. This will allow +# random people to run stellar commands on your server. (such as `stop`) +PUBLIC_HTTP_PORT=false + +# Maximum number of simultaneous HTTP clients +HTTP_MAX_CLIENT=128 + +# COMMANDS (list of strings) default is empty +# List of commands to run on startup. +# Right now only setting log levels really makes sense. +COMMANDS=[ +"ll?level=info&partition=Herder" +] + +# convenience mapping of common names to node IDs. The common names can be used +# in the .cfg. `$common_name`. If set, they will also appear in your logs +# instead of the less friendly nodeID. +NODE_NAMES=[ + "GA22N4YGO7IJDRF2SISA5KHULGYYKDXBQGYIWUVNMSNHF5G2DNBKP3M5 eliza", + "GCDENOCHA6TQL6DFC4FS54HIH7RP7XR7VZCQZFANMGLT2WXJ7D7KGV2P hal9000" +] + +########################### +## Configure which network this instance should talk to + +NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015" + +########################### +## Overlay configuration + +# PEER_PORT (Integer) defaults to 11625 +# The port other instances of stellar-core can connect to you on. +PEER_PORT=11625 + +# TARGET_PEER_CONNECTIONS (Integer) default 8 +# This controls how aggressively the server will connect to other peers. +# It will send outbound connection attempts until it is at this +# number of outbound peer connections. +TARGET_PEER_CONNECTIONS=8 + +# MAX_ADDITIONAL_PEER_CONNECTIONS (Integer) default -1 +# Numbers of peers allowed to make inbound connection to this instance +# Setting this too low will result in peers stranded out of the network +# -1: use TARGET_PEER_CONNECTIONS*8 as value for this field +MAX_ADDITIONAL_PEER_CONNECTIONS=-1 + +# MAX_PENDING_CONNECTIONS (Integer) default 500 +# Maximum number of pending (non authenticated) connections to this server. +# This value is split between inbound and oubound connections in the same +# proportion as MAX_ADDITIONAL_PEER_CONNECTIONS is to TARGET_PEER_CONNECTIONS. +# This value may be additionally capped by OS limits of open connections. +# Additionally, 2 more inbound connections are allowed if coming from +# preferred peers. +MAX_PENDING_CONNECTIONS=500 + +# PEER_AUTHENTICATION_TIMEOUT (Integer) default 2 +# This server will drop peer that does not authenticate itself during that +# time. +PEER_AUTHENTICATION_TIMEOUT=2 + +# PEER_TIMEOUT (Integer) default 30 +# This server will drop peer that does not send or receive anything during that +# time when authenticated. +PEER_TIMEOUT=30 + +# PEER_STRAGGLER_TIMEOUT (Integer) default 120 +# This server will drop peer that does not drain its outgoing queue during that +# time when authenticated. +PEER_STRAGGLER_TIMEOUT=120 + +# MAX_BATCH_READ_PERIOD_MS (Integer) default 100 +# How long this server can spend processing reads from a peer at once +MAX_BATCH_READ_PERIOD_MS=100 + +# MAX_BATCH_READ_COUNT (Integer) default 1024 +# How many messages can this server read at once from a peer +MAX_BATCH_READ_COUNT=1024 + +# MAX_BATCH_WRITE_COUNT (Integer) default 1024 +# How many messages can this server send at once to a peer +MAX_BATCH_WRITE_COUNT=1024 + +# MAX_BATCH_WRITE_BYTES (Integer) default 1048576 (1 Megabyte) +# How many bytes can this server send at once to a peer +MAX_BATCH_WRITE_BYTES=1048576 + +# PREFERRED_PEERS (list of strings) default is empty +# These are IP:port strings that this server will add to its DB of peers. +# This server will try to always stay connected to the other peers on this list. +PREFERRED_PEERS=["127.0.0.1:7000","127.0.0.1:8000"] + +# PREFERRED_PEER_KEYS (list of strings) default is empty +# These are public key identities that this server will treat as preferred +# when connecting, similar to the PREFERRED_PEERS list. +# can use a name already defined in the .cfg +PREFERRED_PEER_KEYS=[ +"GBKXI3TVIFHD6QDSNMUOTJFDWHDYDVRRPWIHN4IM2YFXIUEWDTY7DSSI", +"GBDOAYUPGQCPLJCP2HYJQ4W3ADODJFZISHRBQTQB7SFVR4BRUX46RYIP optional_common_name", +"$eliza"] + +# PREFERRED_PEERS_ONLY (boolean) default is false +# When set to true, this peer will only connect to PREFERRED_PEERS and will +# only accept connections from PREFERRED_PEERS or PREFERRED_PEER_KEYS +PREFERRED_PEERS_ONLY=false + +# SURVEYOR_KEYS (list of strings) default is empty +# These are public key identities. If empty, this node will relay/respond to survey messages +# originating from a node in this nodes transitive quorum. If this list is NOT empty, +# this node will only relay/respond to messages that originate from nodes in this list +# can use a name already defined in the .cfg +SURVEYOR_KEYS=[ +"GBKXI3TVIFHD6QDSNMUOTJFDWHDYDVRRPWIHN4IM2YFXIUEWDTY7DSSI", +"$eliza"] + +# Percentage, between 0 and 100, of system activity (measured in terms +# of both event-loop cycles and database time) below-which the system +# will consider itself "loaded" and attempt to shed load. Set this +# number low and the system will be tolerant of overloading. Set it +# high and the system will be intolerant. By default it is 0, meaning +# totally insensitive to overloading. +MINIMUM_IDLE_PERCENT=0 + +# KNOWN_PEERS (list of strings) default is empty +# These are IP:port strings that this server will add to its DB of peers. +# It will try to connect to these when it is below TARGET_PEER_CONNECTIONS. +KNOWN_PEERS=[ +"core-testnet1.stellar.org", +"core-testnet2.stellar.org", +"core-testnet3.stellar.org"] + +# KNOWN_CURSORS (list of strings) default is empty +# Set of cursors added at each startup with value '1'. +KNOWN_CURSORS=["HORIZON"] + +####################### +## SCP settings + +# NODE_SEED (string) default random, regenerated each run. +# The seed used for generating the public key this node will +# be identified with in SCP. +# Your seed should be unique. Protect your seed. Treat it like a password. +# If you don't set a NODE_SEED one will be generated for you randomly +# on each startup. +# +# To generate a new, stable seed (and associated public key), run: +# +# stellar-core gen-seed +# +# You only need to keep the seed from this; you can always recover the +# public key from the seed by running: +# +# stellar-core convert-id +# +# This example also adds a common name to NODE_NAMES list named `self` with the +# public key associated to this seed +NODE_SEED="SBI3CZU7XZEWVXU7OZLW5MMUQAP334JFOPXSLTPOH43IRTEQ2QYXU5RG self" + +# NODE_IS_VALIDATOR (boolean) default false. +# Only nodes that want to participate in SCP should set NODE_IS_VALIDATOR=true. +# Most instances should operate in observer mode with NODE_IS_VALIDATOR=false. +# See QUORUM_SET below. +NODE_IS_VALIDATOR=false + +# NODE_HOME_DOMAIN (string) default empty. +# HOME_DOMAIN for this validator +# When set, this validator will be grouped with other validators with the +# same HOME_DOMAIN (as defined in VALIDATORS/HOME_DOMAINS) +NODE_HOME_DOMAIN="" + +########################### +# Consensus settings + +# FAILURE_SAFETY (integer) default -1 +# Most people should leave this to -1 +# This is the maximum number of validator failures from your QUORUM_SET that +# you want to be able to tolerate. +# Typically, you will need at least 3f+1 nodes in your quorum set. +# If you don't have enough nodes in your quorum set to tolerate the level you +# set here stellar-core won't run as a precaution. +# A value of -1 indicates to use (n-1)/3 (n being the number of nodes +# and groups from the top level of your QUORUM_SET) +# A value of 0 is only allowed if UNSAFE_QUORUM is set +# Note: The value of 1 below is the maximum number derived from the value of +# QUORUM_SET in this configuration file +FAILURE_SAFETY=1 + +# UNSAFE_QUORUM (true or false) default false +# Most people should leave this to false. +# If set to true allows to specify a potentially unsafe quorum set. +# Otherwise it won't start if +# a threshold % is set too low (threshold below 66% for the top level, +# 51% for other levels) +# FAILURE_SAFETY at 0 or above the number of failures that can occur +# You might want to set this if you are running your own network and +# aren't concerned with byzantine failures or if you fully understand how the +# quorum sets of other nodes relate to yours when it comes to +# quorum intersection. +UNSAFE_QUORUM=false + +######################### +## History + + +# CATCHUP_COMPLETE (true or false) defaults to false +# if true will catchup to the network "completely" (replaying all history) +# if false will look for CATCHUP_RECENT for catchup settings +CATCHUP_COMPLETE=false + +# CATCHUP_RECENT (integer) default to 0 +# if CATCHUP_COMPLETE is true this option is ignored +# if set to 0 will catchup "minimally", using deltas to the most recent +# snapshot. +# if set to any other number, will catchup "minimally" to some past snapshot, +# then will replay history from that point to current snapshot, ensuring that +# at least CATCHUP_RECENT number of ledger entries will be present in database +# if "some past snapshot" is already present in database, it just replays all +# new history +CATCHUP_RECENT=1024 + +# WORKER_THREADS (integer) default 10 +# Number of threads available for doing long durations jobs, like bucket +# merging and vertification. +WORKER_THREADS=10 + +# QUORUM_INTERSECTION_CHECKER (boolean) default true +# Enable/disable computation of quorum intersection monitoring +QUORUM_INTERSECTION_CHECKER=true + +# MAX_CONCURRENT_SUBPROCESSES (integer) default 16 +# History catchup can potentialy spawn a bunch of sub-processes. +# This limits the number that will be active at a time. +MAX_CONCURRENT_SUBPROCESSES=10 + +# AUTOMATIC_MAINTENANCE_PERIOD (integer, seconds) default 14400 +# Interval between automatic maintenance executions +# Set to 0 to disable automatic maintenance +AUTOMATIC_MAINTENANCE_PERIOD=14400 + +# AUTOMATIC_MAINTENANCE_COUNT (integer) default 50000 +# Number of unneeded ledgers in each table that will be removed during one +# maintenance run. +# NB: make sure that enough ledgers are deleted as to offset the growth of +# data accumulated by closing ledgers (catchup and normal operation) +# Set to 0 to disable automatic maintenance +AUTOMATIC_MAINTENANCE_COUNT=50000 + +############################### +## The following options should probably never be set. They are used primarily +## for testing. + +# RUN_STANDALONE (true or false) defaults to false +# This is a mode for testing. It prevents you from trying to connect +# to other peers +RUN_STANDALONE=false + + +# INVARIANT_CHECKS (list of strings) default is empty +# Setting this will cause specified invariants to be checked on ledger close and +# on bucket apply. +# Strings specified are matched (as regex) against the list of invariants. +# For example, to enable all invariants use ".*" +# List of invariants: +# - "AccountSubEntriesCountIsValid" +# Setting this will cause additional work on each operation apply - it +# checks if the change in the number of subentries of account (signers + +# offers + data + trustlines) equals the change in the value numsubentries +# store in account. This check is only performed for accounts modified in +# any way in given ledger. +# The overhead may cause slower systems to not perform as fast as the rest +# of the network, caution is advised when using this. +# - "BucketListIsConsistentWithDatabase" +# Setting this will cause additional work on each bucket apply - it checks a +# variety of properties that should be satisfied by an applied bucket, for +# detailed information about what is checked see the comment in the header +# invariant/BucketListIsConsistentWithDatabase.h. +# The overhead may cause a system to catch-up more than once before being +# in sync with the network. +# - "CacheIsConsistentWithDatabase" +# Setting this will cause additional work on each operation apply - it +# checks if internal cache of ledger entries is consistent with content of +# database. It is equivalent to PARANOID_MODE from older versions of +# stellar-core. +# The overhead may cause slower systems to not perform as fast as the rest +# of the network, caution is advised when using this. +# - "ConservationOfLumens" +# Setting this will cause additional work on each operation apply - it +# checks that the total number of lumens only changes during inflation. +# The overhead may cause slower systems to not perform as fast as the rest +# of the network, caution is advised when using this. +# - "LedgerEntryIsValid" +# Setting this will cause additional work on each operation apply - it +# checks a variety of properties that must be true for a LedgerEntry to be +# valid. +# The overhead may cause slower systems to not perform as fast as the rest +# of the network, caution is advised when using this. +# - "LiabilitiesMatchOffers" +# Setting this will cause additional work on each operation apply - it +# checks that accounts, trust lines, and offers satisfy all constraints +# associated with liabilities. For additional information, see the comment +# in the header invariant/LiabilitiesMatchOffers.h. +# The overhead may cause slower systems to not perform as fast as the rest +# of the network, caution is advised when using this. +INVARIANT_CHECKS = [] + + +# MANUAL_CLOSE (true or false) defaults to false +# Mode for testing. Ledger will only close when stellar-core gets +# the `manualclose` command +MANUAL_CLOSE=false + + +# ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING (true or false) defaults to false +# Enables synthetic load generation on demand. +# The load is triggered by the `generateload` runtime command. +# This option only exists for stress-testing and should not be enabled in +# production networks. +ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING=false + + +# ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING (true or false) defaults to false +# Reduces ledger close time to 1s and checkpoint frequency to every 8 ledgers. +# Do not ever set this in production, as it will make your history archives +# incompatible with those of anyone else. +ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING=false + +# ARTIFICIALLY_SET_CLOSE_TIME_FOR_TESTING (in seconds), defaults to no override +# Overrides the close time to the specified value but does not change checkpoint +# frequency - this may cause network instability. +# Do not use in production. +ARTIFICIALLY_SET_CLOSE_TIME_FOR_TESTING=0 + +# ALLOW_LOCALHOST_FOR_TESTING defaults to false +# Allows to connect to localhost, should not be enabled on production systems +# as this is a security threat. +ALLOW_LOCALHOST_FOR_TESTING=false + +# MAXIMUM_LEDGER_CLOSETIME_DRIFT (in seconds) defaults to 50 +# Maximum drift between the local clock and the network time. +# When joining the network for the first time, ignore SCP messages that are +# unlikely to be for the latest ledger. +MAXIMUM_LEDGER_CLOSETIME_DRIFT=50 + +# DISABLE_XDR_FSYNC (true or false) defaults to false. +# If set to true, writing an XDR file (a bucket or a checkpoint) will not +# be followed by an fsync on the file. This in turn means that XDR files +# (which hold the canonical state of the ledger) may be corrupted if the +# operating system suddenly crashes or loses power, causing the node to +# diverge and get stuck on restart, or potentially even publish bad +# history. This option only exists as an escape hatch if the local +# filesystem is so unusably slow that you prefer operating without +# durability guarantees. Do not set it to true unless you're very certain +# you want to make that trade. +DISABLE_XDR_FSYNC=false + +# MAX_SLOTS_TO_REMEMBER (in ledgers) defaults to 12 +# Most people should leave this to 12 +# Number of most recent ledgers keep in memory. Storing more ledgers allows other +# nodes to join the network without catching up. This is useful for simulation +# testing purposes. +MAX_SLOTS_TO_REMEMBER=12 + +# METADATA_OUTPUT_STREAM defaults to "", disabling it. +# A string specifying a stream to write fine-grained metadata to for each ledger +# close while running. This will be opened at startup and synchronously +# streamed-to during both catchup and live ledger-closing. +# +# Streams may be specified either as a pathname (typically a named FIFO on POSIX +# or a named pipe on Windows, though plain files also work) or a string of the +# form "fd:N" for some integer N which, on POSIX, specifies the existing open +# file descriptor N inherited by the process (for example to write to an +# anonymous pipe). +# +# As a further safety check, this option is mutually exclusive with +# NODE_IS_VALIDATOR, as its typical use writing to a pipe with a reader process +# on the other end introduces a potentially-unbounded synchronous delay in +# closing a ledger, and should not be used on a node participating in consensus, +# only a passive "watcher" node. +METADATA_OUTPUT_STREAM="" + +##################### +## Tables must come at the end. (TOML you are almost perfect!) + +# HOME_DOMAINS +# list of properties for home domains +# HOME_DOMAIN: string (required) home domain identifier +# QUALITY: string (required) quality of all validators in HOME_DOMAIN +# CRITICAL, HIGH, MEDIUM, LOW +# HIGH quality validators must have archives and redundancy +# CRITICAL quality validators must have archive and redundancy like HIGH, +# but also enforces that this HOME_DOMAIN must be included to achieve consensus +# +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="HIGH" + +# [[HOME_DOMAINS]] +# HOME_DOMAIN="some-other-domain" +# QUALITY="LOW" + +# VALIDATORS +# List of validators used to automatically generate quorum sets +# +# NB: you need to either depend on exactly one entity OR +# have at least 4 entities to have a "safe" configuration +# see below rules for detail. +# +# The quorum set is generated using the following rules: +# validators with the same home domain (representing an entity) are automatically +# grouped together; the threshold used assumes a simple majority (2f+1) +# entities are grouped by QUALITY +# groups are nested such that the group for the quality that precedes a given group +# is added as a backup for the higher quality group. +# ie: at top level group contains HIGH quality entities and the group that +# contains MEDIUM quality entities +# heterogeneous groups use a threshold assuming byzantine failure (3f+1) +# +# +# Individual validators can be added in standard form +# NAME: string (required) unique identifier to use to identify a validator +# NAME is added as an alias for PUBLIC_KEY +# QUALITY: string (required*) quality of validator +# all validators must have a quality, either directly (as set by this property) +# or indirectly via HOME_DOMAINS (see HOME_DOMAINS for more detail on QUALITY) +# HOME_DOMAIN: string (required) home domain for validator +# PUBLIC_KEY: string (required) public key associated with a validator +# ADDRESS: string (optional) peer:port associated with a validator +# ADDRESS will be added to the KNOWN_PEERS list +# HISTORY: string (optional) history archive GET command associated +# with a validator +# HISTORY will be added to the list of known archives that can be downloaded from + +# Stellar Testnet validators +[[VALIDATORS]] +NAME="sdftest1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="core-testnet1.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdftest2" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP" +ADDRESS="core-testnet2.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdftest3" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z" +ADDRESS="core-testnet3.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}" + +# HISTORY +# Used to specify where to fetch and store the history archives. +# Fetching and storing history is kept as general as possible. +# Any place you can save and load static files from should be usable by the +# stellar-core history system. s3, the file system, http, etc +# stellar-core will call any external process you specify and will pass it the +# name of the file to save or load. +# Simply use template parameters `{0}` and `{1}` in place of the files being transmitted or retrieved. +# You can specify multiple places to store and fetch from. stellar-core will +# use multiple fetching locations as backup in case there is a failure fetching from one. +# +# Note: any archive you *put* to you must run `$ stellar-core new-hist ` +# once before you start. +# for example this config you would run: $ stellar-core new-hist local + +# this creates a `local` archive on the local drive +# NB: this is an example, in general you should probably not do this as +# archives grow indefinitely +[HISTORY.local] +get="cp /tmp/stellar-core/history/vs/{0} {1}" +put="cp {0} /tmp/stellar-core/history/vs/{1}" +mkdir="mkdir -p /tmp/stellar-core/history/vs/{0}" + +# other examples: +# [HISTORY.stellar] +# get="curl http://history.stellar.org/{0} -o {1}" +# put="aws s3 cp {0} s3://history.stellar.org/{1}" + +# [HISTORY.backup] +# get="curl http://backupstore.blob.core.windows.net/backupstore/{0} -o {1}" +# put="azure storage blob upload {0} backupstore {1}" + +#The history store of the Stellar testnet +#[HISTORY.h1] +#get="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}" + +#[HISTORY.h2] +#get="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}" + +#[HISTORY.h3] +#get="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}" + +# QUORUM_SET (optional) +# This is how you specify this server's quorum set manually +# +# *** this section is for advanced users and exists mostly for historical reasons *** +# the preferred way to configure your quorum set is to use instead [[VALIDATORS]] +# +# It can be nested up to 2 levels: {A,B,C,{D,E,F},{G,H,{I,J,K,L}}} +# THRESHOLD_PERCENT is how many have to agree (1-100%) within a given set. +# Each set is treated as one vote. +# So for example in the above there are 5 things that can vote: +# individual validators: A,B,C, and the sets {D,E,F} and {G,H with subset {I,J,K,L}} +# the sets each have their own threshold. +# For example with {100% G,H with subset (50% I,J,K,L}} +# means that quorum will be met with G, H and any 2 (50%) of {I, J, K, L} +# +# a [QUORUM_SET.path] section is constructed as +# THRESHOLD_PERCENT: how many have to agree, defaults to 67 (rounds up). +# VALIDATORS: array of node IDs +# additional subsets [QUORUM_SET.path.item_number] +# a QUORUM_SET +# must not contain duplicate entries {{A,B},{A,C}} is invalid for example +# The key for "self" is implicitely added at the top level, so the effective +# quorum set is [t:2, self, QUORUM_SET]. Note that "self" is always agreeing +# with the instance (if QUORUM_SET includes it) +# +# The following setup is equivalent to the example given above. +# +# Note on naming: you can add common names to the NAMED_NODES list here as +# shown in the first 3 validators or use common names that have been +# previously defined. +# [QUORUM_SET] +# THRESHOLD_PERCENT=66 +# VALIDATORS=[ +# "GDQWITFJLZ5HT6JCOXYEVV5VFD6FTLAKJAUDKHAV3HKYGVJWA2DPYSQV A_from_above", +# "GANLKVE4WOTE75MJS6FQ73CL65TSPYYMFZKC4VDEZ45LGQRCATGAIGIA B_from_above", +# "GDV46EIEF57TDL4W27UFDAUVPDDCKJNVBYB3WIV2WYUYUG753FCFU6EJ C_from_above" +# ] +# +# [QUORUM_SET.1] +# THRESHOLD_PERCENT=67 +# VALIDATORS=[ +# "$self", # 'D' from above is this node +# "GDXJAZZJ3H5MJGR6PDQX3JHRREAVYNCVM7FJYGLZJKEHQV2ZXEUO5SX2 E_from_above", +# "GB6GK3WWTZYY2JXWM6C5LRKLQ2X7INQ7IYTSECCG3SMZFYOZNEZR4SO5 F_from_above" +# ] +# +# [QUORUM_SET.2] +# THRESHOLD_PERCENT=100 +# VALIDATORS=[ +# "GCTAIXWDDBM3HBDHGSAOLY223QZHPS2EDROF7YUBB3GNYXLOCPV5PXUK G_from_above", +# "GCJ6UBAOXNQFN3HGLCVQBWGEZO6IABSMNE2OCQC4FJAZXJA5AIE7WSPW H_from_above" +# ] +# +# [QUORUM_SET.2.1] +# THRESHOLD_PERCENT=50 +# VALIDATORS=[ +# "GC4X65TQJVI3OWAS4DTA2EN2VNZ5ZRJD646H5WKEJHO5ZHURDRAX2OTH I_from_above", +# "GAXSWUO4RBELRQT5WMDLIKTRIKC722GGXX2GIGEYQZDQDLOTINQ4DX6F J_from_above", +# "GAWOEMG7DQDWHCFDTPJEBYWRKUUZTX2M2HLMNABM42G7C7IAPU54GL6X K_from_above", +# "GDZAJNUUDJFKTZX3YWZSOAS4S4NGCJ5RQAY7JPYBG5CUFL3JZ5C3ECOH L_from_above" +# ] + + diff --git a/stellar/docker-compose.yml b/stellar/docker-compose.yml new file mode 100644 index 0000000..4505074 --- /dev/null +++ b/stellar/docker-compose.yml @@ -0,0 +1,20 @@ +version: '3.6' + +services: + stellar-core-postgres: + image: postgres:9 + restart: on-failure + volumes: + - ./data/postgres:/var/lib/postgresql/data + environment: + - POSTGRES_DB=stellar-core + + stellar-core-pubnet: + image: quay.io/openware/stellar:10.2.0 + volumes: + - ./config/stellar-core.cfg:/stellar-core.cfg + - ./data/stellar:/data + restart: always + ports: + - 11625:11625 + - 11626:11626 diff --git a/stellar/scripts/docker-entrypoint.sh b/stellar/scripts/docker-entrypoint.sh new file mode 100755 index 0000000..76c6739 --- /dev/null +++ b/stellar/scripts/docker-entrypoint.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +set -ue + +function stellar_core_init_db() { + if [ -z ${INITIALIZE_DB:-} ] || [ "${INITIALIZE_DB}" != "true" ]; then + echo "Not initializing DB (set INITIALIZE_DB=true if you want to initialize it)." + return 0 + fi + + local DB_INITIALIZED="/data/.db-initialized" + + if [ -f $DB_INITIALIZED ]; then + echo "Core db has already been initialized." + return 0 + fi + + echo "Initializing core db..." + + stellar-core new-db --conf /stellar-core.cfg + + echo "Finished initializing core db" + + touch $DB_INITIALIZED +} + +function stellar_core_init_history_archives() { + if [ -z ${INITIALIZE_HISTORY_ARCHIVES:-} ] || [ "${INITIALIZE_HISTORY_ARCHIVES}" != "true" ]; then + echo "Not initializing history archives (set INITIALIZE_HISTORY_ARCHIVES=true if you want to initialize them)." + return 0 + fi + + for HISTORY_ARCHIVE in $(echo $HISTORY | jq -r 'to_entries[] | select (.value.put?) | .key'); do + local HISTORY_ARCHIVE_INITIALIZED="/data/.history-archive-${HISTORY_ARCHIVE}-initialized" + + if [ -f $HISTORY_ARCHIVE_INITIALIZED ]; then + echo "History archive ${HISTORY_ARCHIVE} has already been initialized." + continue + fi + + echo "Initializing history archive ${HISTORY_ARCHIVE}..." + + stellar-core new-hist $HISTORY_ARCHIVE --conf /stellar-core.cfg + + echo "Finished initializing history archive ${HISTORY_ARCHIVE}." + + touch $HISTORY_ARCHIVE_INITIALIZED + done +} + +stellar_core_init_db +stellar_core_init_history_archives + +exec "$@" diff --git a/stellar/scripts/install.sh b/stellar/scripts/install.sh new file mode 100755 index 0000000..9d02d6c --- /dev/null +++ b/stellar/scripts/install.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +set -ue + +# install deps +apt-get update +apt-get install -y $STELLAR_CORE_BUILD_DEPS + +# clone, compile, and install stellar core +git clone --branch $STELLAR_CORE_VERSION --recursive --depth 1 https://github.com/stellar/stellar-core.git + +cd stellar-core +./autogen.sh +./configure +make +make install +cd .. + +# cleanup +rm -rf stellar-core +apt-get remove -y $STELLAR_CORE_BUILD_DEPS +apt-get autoremove -y + +# install deps +apt-get install -y $STELLAR_CORE_DEPS + +# cleanup apt cache +rm -rf /var/lib/apt/lists/*