diff --git a/.circleci/config.yml b/.circleci/config.yml
index ba2029ffb..8348c2a7b 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1,4 +1,4 @@
-version: 2.1
+version: 2.1
jobs:
checkDependencies:
@@ -12,7 +12,7 @@ jobs:
checkCandidatesFile:
docker:
- - image: node:18
+ - image: node:22
resource_class: large
steps:
- checkout
@@ -24,7 +24,7 @@ jobs:
checkCoreESLint:
docker:
- - image: node:18
+ - image: node:22
resource_class: large
steps:
- checkout
@@ -36,7 +36,7 @@ jobs:
checkCommonESLint:
docker:
- - image: node:18
+ - image: node:22
resource_class: large
steps:
- checkout
@@ -49,7 +49,7 @@ jobs:
CommonUnitTests:
description: "Common Unit Tests"
docker:
- - image: node:18-bullseye
+ - image: node:22-bullseye
- image: mongo:6.0.9
name: mongodb
resource_class: xlarge
@@ -67,7 +67,7 @@ jobs:
ChaindataIntegrationTests:
description: "Chaindata Integration Tests"
docker:
- - image: node:18-bullseye
+ - image: node:22-bullseye
- image: mongo:6.0.9
name: mongodb
resource_class: xlarge
@@ -85,7 +85,7 @@ jobs:
ApiHandlerIntegrationTests:
description: "ApiHandler Integration Tests"
docker:
- - image: node:18-bullseye
+ - image: node:22-bullseye
- image: mongo:6.0.9
name: mongodb
resource_class: xlarge
@@ -103,7 +103,7 @@ jobs:
NominatorIntegrationTests:
description: "Nominator Integration Tests"
docker:
- - image: node:18-bullseye
+ - image: node:22-bullseye
- image: mongo:6.0.9
name: mongodb
resource_class: xlarge
@@ -121,7 +121,7 @@ jobs:
ScorekeeperIntegrationTests:
description: "Scorekeeper Integration Tests"
docker:
- - image: node:18-bullseye
+ - image: node:22-bullseye
- image: mongo:6.0.9
name: mongodb
@@ -140,7 +140,7 @@ jobs:
TelemetryIntegrationTests:
description: "Telemetry Integration Tests"
docker:
- - image: node:18-bullseye
+ - image: node:22-bullseye
- image: mongo:6.0.9
name: mongodb
diff --git a/.gitignore b/.gitignore
index f3f34e58a..7106ecba9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,8 +28,6 @@ dist
**/config/main.json
**/config/secret.json
-redis/
-
docs/.DS_Store
/packages/core/kusama-matrix.txt
/packages/core/polkadot-matrix.txt
@@ -43,4 +41,4 @@ docs/.DS_Store
build/**
dist/**
.next/**
-coverage/**
\ No newline at end of file
+coverage/**
diff --git a/.yarnrc.yml b/.yarnrc.yml
index ef175c47b..7b9cb61c5 100644
--- a/.yarnrc.yml
+++ b/.yarnrc.yml
@@ -11,8 +11,3 @@ plugins:
pnpMode: loose
yarnPath: .yarn/releases/yarn-3.2.2.cjs
-
-packageExtensions:
- esbuild@*:
- dependencies:
- "@esbuild/linux-arm64": "*"
diff --git a/Dockerfile b/Dockerfile
index a712d3c11..6c1723b96 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,17 +1,23 @@
-FROM node:21-slim AS builder
-ARG MATRIX_TOKEN
-ARG PACKAGE
-ENV PACKAGE ${PACKAGE}
+FROM node:22-slim AS docs_builder
WORKDIR /code
COPY docs docs
RUN cd docs && npm install && npm run build
+FROM node:22-slim
+WORKDIR /code
+ARG MATRIX_TOKEN
+ARG PACKAGE
+ENV PACKAGE ${PACKAGE}
+
+COPY --from=docs_builder /code/docs/build/ /code/docs/build/
+
COPY . .
RUN --mount=type=cache,target=/code/.yarn/cache \
--mount=type=cache,target=/turbo_cache \
yarn install --immutable && \
- yarn turbo --cache-dir /turbo_cache
+ yarn turbo --cache-dir /turbo_cache && \
+ yarn workspaces focus --production
CMD yarn run start:js:${PACKAGE}
diff --git a/Dockerfile-dev b/Dockerfile-dev
deleted file mode 100644
index 733599d1f..000000000
--- a/Dockerfile-dev
+++ /dev/null
@@ -1,16 +0,0 @@
-FROM node:18-slim AS builder
-RUN apt-get update && apt-get install -y curl
-ARG MATRIX_TOKEN
-ARG PACKAGE
-ENV PACKAGE ${PACKAGE}
-COPY . /code
-WORKDIR /code
-RUN echo "building ${PACKAGE}... " && \
- yarn install && \
- echo "yarn install done. Building...." && \
- yarn build && \
- echo "building ${PACKAGE} done." && \
- apt-get update && \
- apt-get install -y libssl-dev && \
- apt-get clean
-CMD yarn run start:dev:${PACKAGE}
diff --git a/README.md b/README.md
index 8aa5181ec..a3a9ac495 100644
--- a/README.md
+++ b/README.md
@@ -26,16 +26,13 @@ The nominating backend will routinely change its nominations at every era. The b
> A monorepo containing TypeScript microservices for the Thousand Validators Program.
-The following is a monorepo of packages for the Thousand Validators Program. Each package is a microservice that can be run independently or together with other microservices.
+The following is a monorepo of packages for the Thousand Validators Program.
The monorepo is managed using Yarn workspaces, and contains the following packages:
- [`packages/common`](packages/common): A package containing common code shared across all microservices.
- [`packages/core`](packages/core): A package containing the core logic of the Thousand Validators Program.
- [`packages/gateway`](packages/gateway): A package for an API gateway that exposes the backend with a REST API.
- [`packages/telemetry`](packages/telemetry): A package for a telemetry client that monitors uptime
-- [`packages/worker`](packages/worker): A packages for job queue workers that perform background tasks.
-
-
## Installation & Setup
@@ -43,16 +40,9 @@ The monorepo is managed using Yarn workspaces, and contains the following packag
There's a few ways of running the backend with docker containers, either in kubernetes, or with docker-compose.
-There is the `Current / Monolith` way of running instances, and the `Microservice` way of running instances.
-
-`Current / Monolith` Architecture:
-
-![Current / Monolith Architecture](./docs/architecture/monolith.png)
+Current architecture:
-
-`Microservice` Architecture:
-
-![Microservice Architecture](./docs/architecture/microservice.png)
+![Current Architecture](./docs/architecture/monolith.png)
The following are different ways of running in either `Current` or `Microservice` architecture with either `Kusama` or `Polkadot`, and either `Development` or `Production`:
@@ -60,18 +50,6 @@ The following are different ways of running in either `Current` or `Microservice
- Running as a monolith with production values
- `Polkadot Current`
- Running as a monolith with production values
-- `Kusama Microservice`
- - Running as microservices with production values
-- `Polkadot Microservice`
- - Running as microservices with production values
-- `Polkadot Current Dev`
- - Running as a monolith with development values
-- `Kusama Current Dev`
- - Running as a monolith with development values
-- `Kusama Microservice Dev`
- - Running as microservices with development values
-- `Polkadot Microservice Dev`
- - Running as microservices with development values
Each package contains a `Dockerfile`, which is used for running in production, and `Dockerfile-dev`, which is used for development. The development images will use run with `nodemon` so that each time files is saved/changed it will rebuild the image and restart the container. Any changes for the regular run `Dockerfile` will need a manual rebuilding of the docker image.
@@ -86,8 +64,7 @@ cd 1k-validators-be
### Installing System Dependencies
Ensure the following are installed on your machine:
-- [Node.js](https://nodejs.org/en/) (v12 or higher)
-- [Yarn](https://yarnpkg.com/) (v1.22 or higher)
+- [Node.js](https://nodejs.org/en/) (v21 or higher)
- [Docker](https://www.docker.com/) (v19 or higher)
@@ -121,30 +98,6 @@ Polkadot Current / Monolith Dev:
yarn docker:polkadot-current-dev:start
```
-Kusama Microservice Production:
-
-```bash
-yarn docker:kusama-microscervice:start
-```
-
-Kusama Microservice Dev:
-
-```bash
-yarn docker:kusama-microservice-dev:start
-```
-
-Polkadot Microservice Production:
-
-```bash
-yarn docker:polkadot-current:start
-```
-
-Polkadot Microservice Dev:
-
-```bash
-yarn docker:polkadot-current-dev:start
-```
-
### Install Yarn Dependencies
```bash
yarn install
@@ -170,50 +123,6 @@ This will create a configuration file for a Kusama instance that mirrors what is
yarn create-config-polkadot-current
```
-Kusama Microservice Config:
-This will create configuration files for a Kusama instance for each microservice that runs with production values. This runs `core`, `gateway`, `telemetry`, and `worker` as separate processes in their own container - each one needs it's own configuration file.
-```bash
-yarn create-config-kusama-microservice
-```
-
-Polkadot Microservice Config:
-This will create configuration files for a Polkadot instance for each microservice that runs with production values. This runs `core`, `gateway`, `telemetry`, and `worker` as separate processes in their own container - each one needs it's own configuration file.
-```bash
-yarn create-config-polkadot-microservice
-```
-
-
-
-### Running the Microservices
-
-#### Running `Kusama Current` or `Polkadot Current`:
-
-Either is from the same `docker-compose.current.yml` file, and runs only the `core` container, `mongo` container, and `mongo-express` container.
-
-Build and run as detached daemon:
-```bash
-docker compose -f docker-compose.current.yml up -d --build
-```
-
-#### Running `Kusama Microservice` or `Polkadot Microservice`:
-
-Either is from the same `docker-compose.microservice.yml` file. This runs `core`, `gateway`, `telemetry`, and `worker` as separate processes in their own container - each one needs it's own configuration file. It additionally runs a `redis`, `mongo`, and `mongo-express` container.
-
-Build and run as detached daemon:
-```bash
-docker compose -f docker-compose.microservice.yml up -d --build
-```
-
-#### Running `Kusama Current Dev`, `Polkadot Current Dev`, `Kusama Microservice Dev`, or `Polkadot Microservice Dev`
-
-Either is from the same `docker-compose.yml` file.
-
-Build and run as detached daemon:
-```bash
-docker compose -f docker-compose.yml up -d --build
-```
-
-
### Viewing Logs
To view the aggregated logs of all the containers:
@@ -297,9 +206,6 @@ You can then query an endpoint like `/candidates` by going to `http://localhost:
To view the Mongo Express GUI to interact with the MongoDB Database, go to `http://localhost:8888/` in your browser. Or run `yarn open:mongo-express` from the root directory.
-#### BullMQ Board (Job Queue GUI)
-
-To view the BullMQ Board GUI to interact with the Job Queue, go to `http://localhost:3301/bull` in your browser if running as microservices. Or run `yarn open:bull` from the root directory.
## :pencil: Contribute
diff --git a/apps/1kv-backend-staging/templates/kusama-otv-backend.yaml b/apps/1kv-backend-staging/templates/kusama-otv-backend.yaml
index d79e4c857..b63c8c060 100644
--- a/apps/1kv-backend-staging/templates/kusama-otv-backend.yaml
+++ b/apps/1kv-backend-staging/templates/kusama-otv-backend.yaml
@@ -13,7 +13,7 @@ spec:
# syncPolicy:
# automated:
# prune: true
- # selfHeal: true
+ # selfHeal: true
source:
repoURL: 'https://github.com/w3f/1k-validators-be.git'
path: charts/otv-backend
@@ -46,18 +46,21 @@ spec:
"apiEndpoints": [
"wss://kusama-rpc-tn.dwellir.com",
"wss://kusama-rpc.dwellir.com",
- "wss://kusama.public.curie.radiumblock.xyz/ws",
+ "wss://kusama.public.curie.radiumblock.co/ws",
"wss://rpc.ibp.network/kusama",
"wss://rpc.dotters.network/kusama",
"wss://ksm-rpc.stakeworld.io"
],
+ "apiPeopleEndpoints": ["wss://kusama-people-rpc.polkadot.io"],
"candidatesUrl": "https://raw.githubusercontent.com/w3f/1k-validators-be/master/candidates/kusama.json"
},
"constraints": {
"skipConnectionTime": true,
"skipIdentity": false,
- "skipClientUpgrade": false,
"skipUnclaimed": true,
+ "clientUpgrade": {
+ "skip": false
+ },
"minSelfStake": 10000000000000,
"commission": 150000000,
"unclaimedEraThreshold": 4,
@@ -65,8 +68,7 @@ spec:
"skip": false,
"sanctionedCountries": ["RU", "IR", "CU", "KP", "SY"],
"sanctionedRegions": ["Crimea", "Autonomous Republic of Crimea", "Republic of Crimea", "Luhansk", "Luhanska Oblast", "Luhanska", "Luganskaya Oblast’", "Luganskaya", "Donetsk", "Donetska Oblast", "Donetskaya Oblast’", "Donetska", "Donetskaya", "Sevastopol City", "Sevastopol", "Gorod Sevastopol"]
- },
- "forceClientVersion": "v1.12.0"
+ }
},
"cron": {
"monitor": "0 */15 * * * *",
diff --git a/apps/1kv-backend-staging/templates/polkadot-otv-backend.yaml b/apps/1kv-backend-staging/templates/polkadot-otv-backend.yaml
index 858722979..df4a60d13 100644
--- a/apps/1kv-backend-staging/templates/polkadot-otv-backend.yaml
+++ b/apps/1kv-backend-staging/templates/polkadot-otv-backend.yaml
@@ -55,8 +55,10 @@ spec:
"constraints": {
"skipConnectionTime": false,
"skipIdentity": false,
- "skipClientUpgrade": false,
"skipUnclaimed": true,
+ "clientUpgrade": {
+ "skip": false
+ },
"minSelfStake": 50000000000000,
"commission": 50000000,
"unclaimedEraThreshold": 1,
@@ -64,8 +66,7 @@ spec:
"skip": false,
"sanctionedCountries": ["RU", "IR", "CU", "KP", "SY"],
"sanctionedRegions": ["Crimea", "Autonomous Republic of Crimea", "Republic of Crimea", "Luhansk", "Luhanska Oblast", "Luhanska", "Luganskaya Oblast’", "Luganskaya", "Donetsk", "Donetska Oblast", "Donetskaya Oblast’", "Donetska", "Donetskaya", "Sevastopol City", "Sevastopol", "Gorod Sevastopol"]
- },
- "forceClientVersion": "v1.12.0"
+ }
},
"cron": {
"monitor": "0 */15 * * * *",
diff --git a/apps/1kv-backend/templates/kusama-otv-backend.yaml b/apps/1kv-backend/templates/kusama-otv-backend.yaml
index e277d8c5e..bd3a384e8 100644
--- a/apps/1kv-backend/templates/kusama-otv-backend.yaml
+++ b/apps/1kv-backend/templates/kusama-otv-backend.yaml
@@ -13,11 +13,11 @@ spec:
# syncPolicy:
# automated:
# prune: true
- # selfHeal: true
+ # selfHeal: true
source:
repoURL: https://w3f.github.io/helm-charts/
chart: otv-backend
- targetRevision: v3.3.0
+ targetRevision: v3.3.1
plugin:
env:
- name: HELM_VALUES
@@ -43,19 +43,22 @@ spec:
"apiEndpoints": [
"wss://kusama-rpc-tn.dwellir.com",
"wss://kusama-rpc.dwellir.com",
- "wss://kusama.public.curie.radiumblock.xyz/ws",
+ "wss://kusama.public.curie.radiumblock.co/ws",
"wss://rpc.ibp.network/kusama",
"wss://rpc.dotters.network/kusama",
"wss://ksm-rpc.stakeworld.io"
],
+ "apiPeopleEndpoints": ["wss://kusama-people-rpc.polkadot.io"],
"candidatesUrl": "https://raw.githubusercontent.com/w3f/1k-validators-be/master/candidates/kusama.json"
},
"constraints": {
"skipConnectionTime": false,
"skipIdentity": false,
"skipStakedDestination": true,
- "skipClientUpgrade": false,
"skipUnclaimed": true,
+ "clientUpgrade": {
+ "skip": false
+ },
"minSelfStake": 10000000000000,
"commission": 150000000,
"unclaimedEraThreshold": 4,
@@ -63,8 +66,7 @@ spec:
"skip": false,
"sanctionedCountries": ["RU", "IR", "CU", "KP", "SY"],
"sanctionedRegions": ["Crimea", "Autonomous Republic of Crimea", "Republic of Crimea", "Luhansk", "Luhanska Oblast", "Luhanska", "Luganskaya Oblast’", "Luganskaya", "Donetsk", "Donetska Oblast", "Donetskaya Oblast’", "Donetska", "Donetskaya", "Sevastopol City", "Sevastopol", "Gorod Sevastopol"]
- },
- "forceClientVersion": "v1.12.0"
+ }
},
"cron": {
"monitor": "0 */15 * * * *",
diff --git a/apps/1kv-backend/templates/polkadot-otv-backend.yaml b/apps/1kv-backend/templates/polkadot-otv-backend.yaml
index 55dd942ef..8f25d88a5 100644
--- a/apps/1kv-backend/templates/polkadot-otv-backend.yaml
+++ b/apps/1kv-backend/templates/polkadot-otv-backend.yaml
@@ -17,7 +17,7 @@ spec:
source:
repoURL: https://w3f.github.io/helm-charts/
chart: otv-backend
- targetRevision: v3.3.0
+ targetRevision: v3.3.1
plugin:
env:
- name: HELM_VALUES
@@ -53,8 +53,10 @@ spec:
"skipConnectionTime": false,
"skipIdentity": false,
"skipStakedDestination": true,
- "skipClientUpgrade": false,
"skipUnclaimed": true,
+ "clientUpgrade": {
+ "skip": false
+ },
"minSelfStake": 50000000000000,
"commission": 50000000,
"unclaimedEraThreshold": 1,
@@ -62,8 +64,7 @@ spec:
"skip": false,
"sanctionedCountries": ["RU", "IR", "CU", "KP", "SY"],
"sanctionedRegions": ["Crimea", "Autonomous Republic of Crimea", "Republic of Crimea", "Luhansk", "Luhanska Oblast", "Luhanska", "Luganskaya Oblast’", "Luganskaya", "Donetsk", "Donetska Oblast", "Donetskaya Oblast’", "Donetska", "Donetskaya", "Sevastopol City", "Sevastopol", "Gorod Sevastopol"]
- },
- "forceClientVersion": "v1.12.0"
+ }
},
"cron": {
"monitor": "0 */15 * * * *",
diff --git a/charts/otv-backend/Chart.yaml b/charts/otv-backend/Chart.yaml
index 63127b1c4..e936430ea 100644
--- a/charts/otv-backend/Chart.yaml
+++ b/charts/otv-backend/Chart.yaml
@@ -1,5 +1,5 @@
description: 1K Validators Backend
name: otv-backend
-version: v3.3.0
-appVersion: v3.3.0
+version: v3.3.1
+appVersion: v3.3.1
apiVersion: v2
diff --git a/charts/otv-backend/values.yaml b/charts/otv-backend/values.yaml
index 861175842..bcda562b3 100644
--- a/charts/otv-backend/values.yaml
+++ b/charts/otv-backend/values.yaml
@@ -21,13 +21,16 @@ dataPath: "/data"
backendPort: 3300
storageSize: 20Gi
-resources:
- limits:
- cpu: 600m
- memory: 1Gi
- requests:
- cpu: 300m
- memory: 400Mi
+# Uncomment the following resource to use
+# the default suggested resources
+# resources:
+# limits:
+# cpu: 600m
+# memory: 1Gi
+# requests:
+# cpu: 300m
+# memory: 400Mi
+resources: {}
secret: |
{
diff --git a/docker-compose.current.yml b/docker-compose.current.yml
deleted file mode 100644
index d692b1a9a..000000000
--- a/docker-compose.current.yml
+++ /dev/null
@@ -1,44 +0,0 @@
-version: '3'
-services:
- 1kv-core:
- build:
- context: .
- args:
- PACKAGE: core
- dockerfile: ./Dockerfile
- ports:
- - "127.0.0.1:3300:3300"
- networks:
- testing_net:
- ipv4_address: 172.28.1.1
-
- mongo:
- image: mongo
- restart: always
- ports:
- - "127.0.0.1:27017:27017"
- networks:
- testing_net:
- ipv4_address: 172.28.1.2
-
- mongo-express:
- image: mongo-express
- environment:
- - ME_CONFIG_MONGODB_SERVER=mongo
- - ME_CONFIG_MONGODB_PORT=27017
- - ME_CONFIG_MONGODB_ENABLE_ADMIN=true
- - ME_CONFIG_MONGODB_AUTH_DATABASE=admin
- depends_on:
- - mongo
- ports:
- - "127.0.0.1:8888:8081"
- networks:
- testing_net:
- ipv4_address: 172.28.1.3
-
-networks:
- testing_net:
- ipam:
- driver: default
- config:
- - subnet: 172.28.0.0/16
diff --git a/docker-compose.microservice.yml b/docker-compose.microservice.yml
deleted file mode 100644
index f6928d75c..000000000
--- a/docker-compose.microservice.yml
+++ /dev/null
@@ -1,168 +0,0 @@
-version: '3.8'
-services:
-
- 1kv-core:
- build:
- context: .
- args:
- PACKAGE: core
- dockerfile: ./Dockerfile
- depends_on:
- - redis
- ports:
- - "127.0.0.1:3300:3300"
- healthcheck:
- test: [ "CMD-SHELL", "curl -f http://localhost:3300/healthcheck" ]
- interval: 60s
- timeout: 60s
- retries: 3
- start_period: 180s
- deploy:
- restart_policy:
- condition: on-failure
- delay: 300s
- max_attempts: 3
- window: 120s
- networks:
- testing_net:
- ipv4_address: 172.28.1.1
-
- 1kv-gateway:
- build:
- context: .
- args:
- PACKAGE: gateway
- dockerfile: ./Dockerfile
- depends_on:
- - redis
- ports:
- - "127.0.0.1:3301:3301"
- healthcheck:
- test: ["CMD", "curl", "-f", "http://localhost:3301/healthcheck"]
- interval: 30s
- timeout: 10s
- retries: 3
- start_period: 60s
- deploy:
- restart_policy:
- condition: on-failure
- delay: 300s
- max_attempts: 3
- window: 120s
- networks:
- testing_net:
- ipv4_address: 172.28.1.2
-
-
- 1kv-telemetry:
- build:
- context: .
- args:
- PACKAGE: telemetry
- dockerfile: ./Dockerfile
- depends_on:
- - redis
- ports:
- - "127.0.0.1:3302:3302"
- healthcheck:
- test: [ "CMD-SHELL", "curl -f http://localhost:3302/healthcheck" ]
- interval: 30s
- timeout: 10s
- retries: 3
- start_period: 30s
- deploy:
- restart_policy:
- condition: on-failure
- delay: 300s
- max_attempts: 3
- window: 120s
- networks:
- testing_net:
- ipv4_address: 172.28.1.3
-
- 1kv-worker:
- build:
- context: .
- args:
- PACKAGE: worker
- dockerfile: ./Dockerfile
- deploy:
- # mode: replicated
- # replicas: 8
- restart_policy:
- condition: on-failure
- delay: 30s
- max_attempts: 3
- window: 120s
- depends_on:
- - redis
- - mongo
- ports:
- - "127.0.0.1:3303:3303"
- restart: on-failure
- healthcheck:
- test: [ "CMD-SHELL", "curl -f http://localhost:3303/healthcheck" ]
- interval: 30s
- timeout: 60s
- retries: 3
- start_period: 180s
- networks:
- testing_net:
-# ipv4_address: 172.28.1.3
-
-
- mongo:
- image: mongo
- restart: always
- ports:
- - "127.0.0.1:27017:27017"
- networks:
- testing_net:
- ipv4_address: 172.28.1.4
- logging:
- driver: "json-file"
- options:
- max-size: "50m"
-
- mongo-express:
- image: mongo-express
- environment:
- - ME_CONFIG_MONGODB_SERVER=mongo
- - ME_CONFIG_MONGODB_PORT=27017
- - ME_CONFIG_MONGODB_ENABLE_ADMIN=true
- - ME_CONFIG_MONGODB_AUTH_DATABASE=admin
- depends_on:
- - mongo
- ports:
- - "127.0.0.1:8888:8081"
- networks:
- testing_net:
- ipv4_address: 172.28.1.5
- logging:
- driver: "json-file"
- options:
- max-size: "50m"
-
- redis:
- image: redis:alpine
- expose:
- - "6379"
- volumes:
- - ./redis:/data
- restart: always
- command: ["redis-server", "--bind", "172.28.1.6", "--port", "6379"]
- logging:
- driver: "json-file"
- options:
- max-size: "50m"
- networks:
- testing_net:
- ipv4_address: 172.28.1.6
-
-
-networks:
- testing_net:
- ipam:
- driver: default
- config:
- - subnet: 172.28.0.0/16
diff --git a/docker-compose.yml b/docker-compose.yml
index f849e6b2e..d692b1a9a 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,218 +1,44 @@
-version: '3.8'
+version: '3'
services:
-
- autoheal:
- restart: always
- image: willfarrell/autoheal
- environment:
- - AUTOHEAL_CONTAINER_LABEL=all
- volumes:
- - /var/run/docker.sock:/var/run/docker.sock
-
- 1kv-core:
- environment:
- - PACKAGE=core
- image: 1kv-core:latest
- build:
- context: .
- args:
- PACKAGE: core
- dockerfile: ./Dockerfile-dev
- restart: on-failure
- depends_on:
- - redis
- - mongo
- volumes:
- - .:/code
- ports:
- - "127.0.0.1:3300:3300"
- healthcheck:
- test: [ "CMD-SHELL", "curl -f http://localhost:3300/healthcheck" ]
- interval: 60s
- timeout: 60s
- retries: 3
- start_period: 180s
- deploy:
- restart_policy:
- condition: on-failure
- delay: 300s
- max_attempts: 3
- window: 120s
- networks:
- testing_net:
- ipv4_address: 172.28.1.7
- logging:
- driver: "json-file"
- options:
- max-size: "50m"
-
- 1kv-gateway:
- environment:
- - PACKAGE=gateway
- image: 1kv-gateway:latest
- build:
- context: .
- args:
- PACKAGE: gateway
- dockerfile: ./Dockerfile-dev
- restart: on-failure
- depends_on:
- - redis
- - mongo
- volumes:
- - .:/code
- ports:
- - "127.0.0.1:3301:3301"
- healthcheck:
- test: ["CMD", "curl", "-f", "http://localhost:3301/healthcheck"]
- interval: 30s
- timeout: 10s
- retries: 3
- start_period: 60s
- deploy:
- restart_policy:
- condition: on-failure
- delay: 300s
- max_attempts: 3
- window: 120s
- networks:
- testing_net:
- ipv4_address: 172.28.1.8
- logging:
- driver: "json-file"
- options:
- max-size: "50m"
-
- 1kv-telemetry:
- environment:
- - PACKAGE=telemetry
- image: 1kv-telemetry:latest
- build:
- context: .
- args:
- PACKAGE: telemetry
- dockerfile: ./Dockerfile-dev
- restart: on-failure
- depends_on:
- - redis
- - mongo
- volumes:
- - .:/code
- ports:
- - "127.0.0.1:3302:3302"
- healthcheck:
- test: [ "CMD-SHELL", "curl -f http://localhost:3302/healthcheck" ]
- interval: 30s
- timeout: 10s
- retries: 3
- start_period: 30s
- deploy:
- restart_policy:
- condition: on-failure
- delay: 300s
- max_attempts: 3
- window: 120s
- networks:
- testing_net:
- ipv4_address: 172.28.1.9
- logging:
- driver: "json-file"
- options:
- max-size: "50m"
-
- 1kv-worker:
- environment:
- - PACKAGE=worker
- image: 1kv-worker:latest
- build:
- context: .
- args:
- PACKAGE: worker
- dockerfile: ./Dockerfile-dev
- volumes:
- - .:/code
- deploy:
-# mode: replicated
-# replicas: 8
- restart_policy:
- condition: on-failure
- delay: 30s
- max_attempts: 3
- window: 120s
- depends_on:
- - redis
- - mongo
- ports:
- - "127.0.0.1:3303:3303"
- restart: on-failure
- healthcheck:
- test: [ "CMD-SHELL", "curl -f http://localhost:3303/healthcheck" ]
- interval: 30s
- timeout: 60s
- retries: 3
- start_period: 180s
- networks:
- testing_net:
-# ipv4_address: 172.28.1.16
- logging:
- driver: "json-file"
- options:
- max-size: "50m"
-
- mongo:
- image: mongo
- restart: always
- ports:
- - "127.0.0.1:27017:27017"
- networks:
- testing_net:
- ipv4_address: 172.28.1.12
- logging:
- driver: "json-file"
- options:
- max-size: "50m"
-
- mongo-express:
- image: mongo-express
- environment:
- - ME_CONFIG_MONGODB_SERVER=mongo
- - ME_CONFIG_MONGODB_PORT=27017
- - ME_CONFIG_MONGODB_ENABLE_ADMIN=true
- - ME_CONFIG_MONGODB_AUTH_DATABASE=admin
- depends_on:
- - mongo
- ports:
- - "127.0.0.1:8888:8081"
- networks:
- testing_net:
- ipv4_address: 172.28.1.14
- logging:
- driver: "json-file"
- options:
- max-size: "50m"
-
- redis:
- image: redis:latest
- ports:
- - "127.0.0.1:6379:6379"
- expose:
- - "6379"
- volumes:
- - ./redis:/data
- restart: always
-# command: ["redis-server", "--bind", "172.28.1.13", "--port", "6379"]
- command: ["redis-server", "--port", "6379"]
- logging:
- driver: "json-file"
- options:
- max-size: "50m"
- networks:
- testing_net:
- ipv4_address: 172.28.1.13
+ 1kv-core:
+ build:
+ context: .
+ args:
+ PACKAGE: core
+ dockerfile: ./Dockerfile
+ ports:
+ - "127.0.0.1:3300:3300"
+ networks:
+ testing_net:
+ ipv4_address: 172.28.1.1
+
+ mongo:
+ image: mongo
+ restart: always
+ ports:
+ - "127.0.0.1:27017:27017"
+ networks:
+ testing_net:
+ ipv4_address: 172.28.1.2
+
+ mongo-express:
+ image: mongo-express
+ environment:
+ - ME_CONFIG_MONGODB_SERVER=mongo
+ - ME_CONFIG_MONGODB_PORT=27017
+ - ME_CONFIG_MONGODB_ENABLE_ADMIN=true
+ - ME_CONFIG_MONGODB_AUTH_DATABASE=admin
+ depends_on:
+ - mongo
+ ports:
+ - "127.0.0.1:8888:8081"
+ networks:
+ testing_net:
+ ipv4_address: 172.28.1.3
networks:
- testing_net:
- ipam:
- driver: default
- config:
- - subnet: 172.28.0.0/16
+ testing_net:
+ ipam:
+ driver: default
+ config:
+ - subnet: 172.28.0.0/16
diff --git a/docs/architecture/microservice.png b/docs/architecture/microservice.png
deleted file mode 100644
index 092be7645..000000000
Binary files a/docs/architecture/microservice.png and /dev/null differ
diff --git a/docs/docs/backend/backend.md b/docs/docs/backend/backend.md
index 2a6715774..e2c3da0be 100644
--- a/docs/docs/backend/backend.md
+++ b/docs/docs/backend/backend.md
@@ -18,15 +18,13 @@ A monorepo containing TypeScript microservices for the .
# Overview
-> A monorepo containing TypeScript microservices
+> A monorepo containing TypeScript packages
The monorepo is managed using Yarn workspaces, and contains the following packages:
-- [`packages/common`](packages/common): A package containing common code shared across all microservices.
+- [`packages/common`](packages/common): A package containing common code shared across all packages.
- [`packages/core`](packages/core): A package containing the core logic of the Thousand Validators Program.
- [`packages/gateway`](packages/gateway): A package for an API gateway that exposes the backend with a REST API.
- [`packages/telemetry`](packages/telemetry): A package for a telemetry client that monitors uptime
-- [`packages/worker`](packages/worker): A packages for job queue workers that perform background tasks.
-
## Installation & Setup
@@ -35,39 +33,10 @@ The monorepo is managed using Yarn workspaces, and contains the following packag
There's a few ways of running the backend with docker containers, either in kubernetes, or with docker-compose.
-There is the `Current / Monolith` way of running instances, and the `Microservice` way of running instances.
-
-`Current / Monolith` Architecture:
-
-![Current / Monolith Architecture](../../architecture/monolith.png)
-
-
-`Microservice` Architecture:
-
-![Microservice Architecture](../../architecture/microservice.png)
+Current Architecture:
-The following are different ways of running in either `Current` or `Microservice` architecture with either `Kusama` or `Polkadot`, and either `Development` or `Production`:
+![Current Architecture](../../architecture/monolith.png)
-- `Kusama Current`
- - Running as a monolith with production values
-- `Polkadot Current`
- - Running as a monolith with production values
-- `Kusama Microservice`
- - Running as microservices with production values
-- `Polkadot Microservice`
- - Running as microservices with production values
-- `Polkadot Current Dev`
- - Running as a monolith with development values
-- `Kusama Current Dev`
- - Running as a monolith with development values
-- `Kusama Microservice Dev`
- - Running as microservices with development values
-- `Polkadot Microservice Dev`
- - Running as microservices with development values
-
-Each package contains a `Dockerfile`, which is used for running in production, and `Dockerfile-dev`, which is used for development. The development images will use run with `nodemon` so that each time files is saved/changed it will rebuild the image and restart the container. Any changes for the regular run `Dockerfile` will need a manual rebuilding of the docker image.
-
-The difference of running as either `Current` or `Microservice` is in which docker containers get run with `docker-compose` (Microservices have services separated out as their own containers, and additionally rely on Redis for messages queues). Outside of this everything else (whether it's run as a Kusama or Polkadot instance) is determined by the JSON configuration files that get generated.
### Cloning the Repository
```bash
@@ -180,32 +149,13 @@ yarn create-config-polkadot-microservice
#### Running `Kusama Current` or `Polkadot Current`:
-Either is from the same `docker-compose.current.yml` file, and runs only the `core` container, `mongo` container, and `mongo-express` container.
-
-Build and run as detached daemon:
-```bash
-docker compose -f docker-compose.current.yml up -d --build
-```
-
-#### Running `Kusama Microservice` or `Polkadot Microservice`:
-
-Either is from the same `docker-compose.microservice.yml` file. This runs `core`, `gateway`, `telemetry`, and `worker` as separate processes in their own container - each one needs it's own configuration file. It additionally runs a `redis`, `mongo`, and `mongo-express` container.
-
-Build and run as detached daemon:
-```bash
-docker compose -f docker-compose.microservice.yml up -d --build
-```
-
-#### Running `Kusama Current Dev`, `Polkadot Current Dev`, `Kusama Microservice Dev`, or `Polkadot Microservice Dev`
-
-Either is from the same `docker-compose.yml` file.
+Either is from the same `docker-compose.yml` file, and runs only the `core` container, `mongo` container, and `mongo-express` container.
Build and run as detached daemon:
```bash
docker compose -f docker-compose.yml up -d --build
```
-
### Viewing Logs
To view the aggregated logs of all the containers:
diff --git a/docs/docs/backend/config.md b/docs/docs/backend/config.md
index bb85480b5..d8203d8bc 100644
--- a/docs/docs/backend/config.md
+++ b/docs/docs/backend/config.md
@@ -42,7 +42,8 @@ An example config may look something like:
- `dryRun`: Boolean (true/false). If set to true, the Nominator accounts that are added will calculate what a nomination would look like (how many and which validators), but not craft or submit any transactions. No nominations will be done when this flag is set. In the `nominate` function of the `Nominator` class, the `dryRun` flag is checked, and if it is set to true, the function will return after logging the validators it would nominate without doing anything. This flag is optional and set to `false` by default.
- `networkPrefix`: Integer. Defines the network prefix. For Kusama, this is `2`, and for Polkadot, this is `0`. It can be set to `3` for running a local test network, although this isn't used much anymore. **This flag is required for `core` and `worker` services.**
-- `apiEndpoints`: Array of strings. Lists the RPC endpoints for the chain. When given a list of multiple, it will pick one at random to create a websocket connection to - this single connection is used throughout the entire service for any queries or submitting transactions. **This is required for `core` and `worker` services.
+- `apiEndpoints`: Array of strings. Lists the RPC endpoints for the chain. When given a list of multiple, it will pick one at random to create a websocket connection to - this single connection is used throughout the entire service for any queries or submitting transactions. **This is required for `core` and `worker` services.**
+- `apiPeopleEndpoints`: Optional array of strings. Lists the RPC endpoints for People parachain, if it's enabled on the network.
- `bootstrap`: Boolean. An **optional** flag that can be set to `true` to enable the bootstrap process. This can be used when running a instance of the backend and would query the main Kusama or Polkadot instances at the api endpoints specified below to populate the db with non-deterministic values like `rank` or `discoveredAt`. _This isn't currently used anywhere yet_
- `kusamaBootstrapEndpoint`: String. URL for the Kusama bootstrap endpoint. **optional**. _This isn't currently used anywhere yet_.
- `polkadotBootstrapEndpoint`: String. URL for the Polkadot bootstrap endpoint. **optional**. _This isn't currently used anywhere yet_.
@@ -56,9 +57,10 @@ An example config may look something like:
"constraints": {
"skipConnectionTime": true,
"skipIdentity": false,
- "skipClientUpgrade": false,
"skipUnclaimed": false,
- "forceClientVersion": "v0.9.30",
+ "clientUpgrade": {
+ "skip": false
+ },
"minSelfStake": 10000000000000,
"commission": 150000000,
"unclaimedEraThreshold": 4
@@ -69,9 +71,9 @@ The `constraints` section defines validity constraint parameters for validators,
- `skipConnectionTime`: Boolean. Skips checking the 7 day required connection time if set to true. __optional__, defaults to `false`.
- `skipIdentity`: Boolean. Skips the check for a verified identity. __optional__, defaults to `false`.
-- `skipClientUpgrade`: Boolean. Skips client version upgrade check. __optional__, defaults to `false`.
- `skipUnclaimed`: Boolean. Skips the check for unclaimed rewards. __optional__, defaults to `false`.
-- `forceClientVersion`: String. Specific client version to be enforced. __optional__, if this is set, it will allow versions higher than what is specified.
+- `clientUpgrade.skip`: Boolean. Skips client version upgrade check. __optional__, defaults to `false`.
+- `clientUpgrade.forceVersion`: String. Specific client version to be enforced. __optional__, if this is set, it will allow versions >= than what is specified.
- `minSelfStake`: Integer. Minimum self-stake required. **required**. This number needs to be specified in `Plancks` (1 DOT = 10^10 Plancks, 1 KSM = 10^12 Plancks).
- `commission`: Integer. Max commission rate. **required**. This number needs to be specified in chain units that have 6 decimal places - for example `150000000` corresponds to 15% commission.
- `unclaimedEraThreshold`: Integer. Threshold for unclaimed eras. **required**. A validator having pending rewards for past eras longer than this threshold will be deemed invalid. This gets skipped if `skipUnclaimed` is set to `true`. This number is speciefied as number of eras, so `4` for example means validators are invalid if they have pending rewards older than 4 eras ago.
@@ -358,24 +360,6 @@ The format
- `forceRound`: Boolean. upon `scorekeeper` starting, will initiate new nominations immediately, regardless of the time since the last nomination. **required**, defaults to `false`. This can be useful to do nominations when there are issues with proxy transations getting stuck for example.
- `nominating`: Boolean. Indicates whether the nominator account will create and submit transactions or not. **required**. Nominators will only submit transactions when this is set to `true`, otherwise when a nomination is supposed to occur the process will not do anything when set to `false`.
-## Redis
-
-Configuration for Redis. Redis is used when run as microservices for messages queue passing. When run as a monolith it is not used and not required. When run as microservices, `core`, `gateway`, and `worker` will need to have their own redis parameters specified in their respective config files.
-
-An example config may look something like:
-
-```json
- "redis": {
- "enable": true,
- "host": "redis",
- "port": 6379
- },
-```
-
-- `enable`: Boolean. Enables or disables Redis. **optional**. defaults to `false if not specified
-- `host`: String. Redis host. **required** if run as microservices, **optional** if not.
-- `port`: Integer. Redis port. **required** if run as microservices, **optional** if not.
-
## Server
THe `gateway` package uses Koa to serve various db queries from specified endpoints. `gateway` may either be run as a monolith or as a microservice. If run as a microservice, the `gateway` service will need to have its own `server` parameters specified in its config file.
@@ -459,9 +443,10 @@ An example `core` config run as microservices may look something like:
"constraints": {
"skipConnectionTime": true,
"skipIdentity": false,
- "skipClientUpgrade": false,
"skipUnclaimed": false,
- "forceClientVersion": "v0.9.30",
+ "clientUpgrade": {
+ "skip": false
+ },
"minSelfStake": 10000000000000,
"commission": 150000000,
"unclaimedEraThreshold": 4
@@ -505,11 +490,6 @@ An example `core` config run as microservices may look something like:
"forceRound": false,
"nominating": false
},
- "redis": {
- "enable": true,
- "host": "redis",
- "port": 6379
- },
"server": {
"enable": false,
"port": 3300
@@ -541,11 +521,6 @@ An example gateway config run as microservices may look something like:
"uri": "mongodb://mongo:27017"
}
},
- "redis": {
- "enable": true,
- "host": "redis",
- "port": 6379
- },
"server": {
"enable": true,
"port": 3301,
@@ -637,12 +612,7 @@ An example Worker config run as microservices may look something like:
"useOpenGovDelegation": true,
"useRpc": true,
"useClient": true
- },
- "redis": {
- "enable": true,
- "host": "redis",
- "port": 6379
}
}
-```
\ No newline at end of file
+```
diff --git a/helmfile.d/config/kusama/otv-backend-ci.yaml.gotmpl b/helmfile.d/config/kusama/otv-backend-ci.yaml.gotmpl
index 854cb5326..bc2234fca 100644
--- a/helmfile.d/config/kusama/otv-backend-ci.yaml.gotmpl
+++ b/helmfile.d/config/kusama/otv-backend-ci.yaml.gotmpl
@@ -22,20 +22,22 @@ config: |
"apiEndpoints": [
"wss://kusama-rpc-tn.dwellir.com",
"wss://kusama-rpc.dwellir.com",
- "wss://kusama.public.curie.radiumblock.xyz/ws",
+ "wss://kusama.public.curie.radiumblock.co/ws",
"wss://rpc.ibp.network/kusama",
"wss://rpc.dotters.network/kusama",
"wss://ksm-rpc.stakeworld.io"
],
+ "apiPeopleEndpoints": ["wss://kusama-people-rpc.polkadot.io"],
"candidatesUrl": "https://raw.githubusercontent.com/w3f/1k-validators-be/master/candidates/kusama.json"
},
"constraints": {
"skipConnectionTime": false,
"skipIdentity": false,
"skipStakedDestination": true,
- "skipClientUpgrade": false,
"skipUnclaimed": false,
- "forceClientVersion": "v0.9.30",
+ "clientUpgrade": {
+ "skip": false
+ },
"minSelfStake": 10000000000000000000,
"commission": 150000000,
"unclaimedEraThreshold": 4
diff --git a/helmfile.d/config/kusama/otv-backend-local.yaml.gotmpl b/helmfile.d/config/kusama/otv-backend-local.yaml.gotmpl
index cc9e6fa1e..ba8a017db 100644
--- a/helmfile.d/config/kusama/otv-backend-local.yaml.gotmpl
+++ b/helmfile.d/config/kusama/otv-backend-local.yaml.gotmpl
@@ -24,9 +24,10 @@ config: |
"skipConnectionTime": false,
"skipIdentity": false,
"skipStakedDestination": true,
- "skipClientUpgrade": false,
"skipUnclaimed": false,
- "forceClientVersion": "v0.9.30",
+ "clientUpgrade": {
+ "skip": false
+ },
"minSelfStake": 10000000000000000000,
"commission": 150000000,
"unclaimedEraThreshold": 4
diff --git a/helmfile.d/config/polkadot/otv-backend-ci.yaml.gotmpl b/helmfile.d/config/polkadot/otv-backend-ci.yaml.gotmpl
index 27bb1ff75..7e46208fd 100644
--- a/helmfile.d/config/polkadot/otv-backend-ci.yaml.gotmpl
+++ b/helmfile.d/config/polkadot/otv-backend-ci.yaml.gotmpl
@@ -32,9 +32,10 @@ config: |
"skipConnectionTime": false,
"skipIdentity": false,
"skipStakedDestination": false,
- "skipClientUpgrade": false,
"skipUnclaimed": false,
- "forceClientVersion": "v0.9.30",
+ "clientUpgrade": {
+ "skip": false
+ },
"minSelfStake": 50000000000000,
"commission": 50000000,
"unclaimedEraThreshold": 1
diff --git a/helmfile.d/config/polkadot/otv-backend-local.yaml.gotmpl b/helmfile.d/config/polkadot/otv-backend-local.yaml.gotmpl
index f16e1f0c2..38bad4ffa 100644
--- a/helmfile.d/config/polkadot/otv-backend-local.yaml.gotmpl
+++ b/helmfile.d/config/polkadot/otv-backend-local.yaml.gotmpl
@@ -24,9 +24,10 @@ config: |
"skipConnectionTime": false,
"skipIdentity": false,
"skipStakedDestination": false,
- "skipClientUpgrade": false,
"skipUnclaimed": false,
- "forceClientVersion": "v0.9.30",
+ "clientUpgrade": {
+ "skip": false
+ },
"minSelfStake": 50000000000000,
"commission": 50000000,
"unclaimedEraThreshold": 1
diff --git a/package.json b/package.json
index 2b7c7171e..d8afa8b59 100644
--- a/package.json
+++ b/package.json
@@ -6,7 +6,6 @@
"workspaces": [
"packages/scorekeeper-status-ui",
"packages/common",
- "packages/worker",
"packages/gateway",
"packages/telemetry",
"packages/core"
@@ -16,46 +15,21 @@
"scorekeeper-status-ui:build": "cd packages/scorekeeper-status-ui && npm build",
"scorekeeper-status-ui:dev": "yarn workspace @1kv/scorekeeper-status-ui dev",
"open:polkadot-apps": "open-cli https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A9944#/staking",
- "open:bull": "open-cli http://localhost:3301/bull",
- "open:mongo-express: ": "open-cli http://localhost:8888",
+ "open:mongo-express": "open-cli http://localhost:8888",
"create-config-kusama-current": "yarn workspace @1kv/core run create-config-kusama-current",
"create-config-polkadot-current": "yarn workspace @1kv/core run create-config-polkadot-current",
- "create-config-kusama-current-dev": "yarn workspace @1kv/core run create-config-kusama-current-dev",
- "create-config-polkadot-current-dev": "yarn workspace @1kv/core run create-config-polkadot-current-dev",
- "create-config-kusama-microservice": "yarn workspaces foreach run create-config-kusama-microservice",
- "create-config-polkadot-microservice": "yarn workspaces foreach run create-config-polkadot-microservice",
- "create-config-kusama-microservice-dev": "yarn workspaces foreach run create-config-kusama-microservice-dev",
- "create-config-polkadot-microservice-dev": "yarn workspaces foreach run create-config-polkadot-microservice-dev",
- "docker:kusama-current:start": "yarn run create-config-kusama-current && docker compose -f docker-compose.current.yml up -d --build && yarn run docker:logs",
- "docker:polkadot-current:start": "yarn run create-config-polkadot-current && docker compose -f docker-compose.current.yml up -d --build && yarn run docker:logs",
- "docker:kusama-current-dev:start": "yarn run create-config-kusama-current-dev && docker compose -f docker-compose.yml up -d --build && yarn run docker:logs",
- "docker:polkadot-current-dev:start": "yarn run create-config-polkadot-current-dev && docker compose -f docker-compose.yml up -d --build && yarn run docker:logs",
- "docker:kusama-microservice:start": "yarn run create-config-kusama-microservice && docker compose -f docker-compose.microservice.yml up -d --build && yarn run docker:logs",
- "docker:kusama-microservice-dev:start": "yarn run create-config-kusama-microservice-dev && docker compose -f docker-compose.yml up -d --build && yarn run docker:logs",
- "docker:polkadot-microservice:start": "yarn run create-config-polkadot-microservice && docker compose -f docker-compose.microservice.yml up -d --build && yarn run docker:logs",
- "docker:polkadot-microservice-dev:start": "yarn run create-config-polkadot-microservice-dev && docker compose -f docker-compose.yml up -d --build && yarn run docker:logs",
+ "docker:kusama-current:start": "yarn run create-config-kusama-current && docker compose -f docker-compose.yml up -d --build && yarn run docker:logs",
+ "docker:polkadot-current:start": "yarn run create-config-polkadot-current && docker compose -f docker-compose.yml up -d --build && yarn run docker:logs",
"docker:logs": "docker compose logs -f",
"docker:logs:core": "docker logs 1k-validators-be-1kv-core-1 -f",
- "docker:logs:gateway": "docker logs 1k-validators-be-1kv-gateway-1 -f",
- "docker:logs:telemetry": "docker logs 1k-validators-be-1kv-telemetry-1 -f",
- "docker:logs:worker": "docker logs 1k-validators-be-1kv-worker-1 -f",
"docker:stop": "docker compose down",
"lint": "yarn workspaces foreach run lint",
"lint:fix": "yarn workspaces foreach run lint:fix",
- "clean": "rm -rf redis/ && yarn workspaces foreach run clean",
+ "clean": "yarn workspaces foreach run clean",
"build": "yarn run docs && yarn workspaces foreach -t run build",
- "build:prod": "yarn workspaces foreach -pt run build:prod",
- "build:core": "yarn workspace @1kv/common run build run build && yarn workspace @1kv/gateway run build && yarn workspace @1kv/telemetry run build && yarn workspace @1kv/worker run build && yarn workspace @1kv/core run build",
"build:clean": "yarn workspaces foreach run clean:build",
- "start:dev:gateway": "yarn workspace @1kv/gateway run start:dev",
- "start:js:gateway": "NODE_OPTIONS='--max-old-space-size=10096' yarn workspace @1kv/gateway run js:start",
"start:dev:core": "yarn workspace @1kv/core run start:dev",
"start:js:core": "NODE_OPTIONS='--max-old-space-size=10096' yarn workspace @1kv/core run js:start",
- "start:dev:telemetry": "yarn workspace @1kv/telemetry run start:dev",
- "start:js:telemetry": "NODE_OPTIONS='--max-old-space-size=10096' yarn workspace @1kv/telemetry run js:start",
- "start:dev:worker": "yarn workspace @1kv/worker run start:dev",
- "start:js:worker": "NODE_OPTIONS='--max-old-space-size=10096' yarn workspace @1kv/worker run js:start",
- "test:core": "yarn workspace @1kv/core run test",
"test:common:int": "yarn workspace @1kv/common run test:int",
"test:common:unit": "yarn workspace @1kv/common run test:unit",
"test:common:unit:ci": "circleci local execute CommonUnitTests",
@@ -72,15 +46,19 @@
"turbo": "turbo build"
},
"devDependencies": {
- "@ava/typescript": "^4.1.0",
- "@babel/preset-react": "^7.23.3",
+ "@types/cron": "^2.4.0",
+ "@types/koa": "^2.15.0",
+ "@types/koa-bodyparser": "^4.3.12",
+ "@types/mongoose": "^5.11.97",
+ "@types/node": "^20.11.5",
+ "@types/semver": "^7.5.8",
+ "@types/ws": "^8.5.10",
"@typescript-eslint/eslint-plugin": "^5.59.9",
"@typescript-eslint/parser": "^5.59.9",
+ "@vitest/coverage-istanbul": "^1.3.1",
+ "@vitest/coverage-v8": "^1.3.1",
"@vitest/ui": "^1.3.1",
- "ava": "^6.1.2",
"concurrently": "^8.2.2",
- "esbuild": "^0.20.1",
- "esbuild-plugin-node-polyfill": "^0.0.1",
"eslint": "8.42.0",
"eslint-config-prettier": "^8.8.0",
"eslint-plugin-prettier": "5.0.0",
@@ -88,9 +66,11 @@
"nodemon": "^3.1.0",
"open-cli": "^8.0.0",
"prettier": "^3.2.4",
- "typedoc": "^0.25.12",
- "typedoc-plugin-markdown": "^3.17.1",
+ "ts-node": "^10.9.2",
+ "turbo": "v2.0.3",
+ "typescript": "^5.3.3",
"vite": "^5.1.6",
+ "vite-plugin-dts": "^3.7.3",
"vite-plugin-node": "^3.1.0",
"vite-tsconfig-paths": "^4.3.1",
"vitest": "^1.3.1"
@@ -99,40 +79,21 @@
"@1kv/common": "workspace:*",
"@1kv/gateway": "workspace:*",
"@1kv/telemetry": "workspace:*",
- "@1kv/worker": "workspace:*",
- "@bull-board/api": "^5.15.1",
- "@bull-board/koa": "^5.15.0",
"@koa/router": "^12.0.1",
"@octokit/rest": "^20.0.2",
- "@polkadot/api": "^11.1.1",
+ "@polkadot/api": "^11.2.1",
"@polkadot/keyring": "^12.6.2",
- "@types/cron": "^2.4.0",
- "@types/jest": "^29.5.12",
- "@types/koa": "^2.15.0",
- "@types/koa-bodyparser": "^4.3.12",
- "@types/mongoose": "^5.11.97",
- "@types/node": "^20.11.5",
- "@types/semver": "^7.5.8",
- "@types/ws": "^8.5.10",
- "@vitest/coverage-istanbul": "^1.3.1",
- "@vitest/coverage-v8": "^1.3.1",
"axios": "^1.6.7",
- "bree": "^9.2.2",
- "bs58": "^5.0.0",
- "bullmq": "^5.4.2",
"chalk": "5.3.0",
"coingecko-api-v3": "^0.0.29",
"commander": "^12.0.0",
"cron": "^3.1.6",
- "esbuild-node-builtins": "^0.1.0",
+ "date-fns": "^3.6.0",
"eventemitter3": "^5.0.1",
- "hash.js": "^1.1.7",
- "jest": "^29.7.0",
"koa": "^2.15.0",
"koa-bodyparser": "^4.4.1",
"koa-cash": "^4.1.1",
"koa-mount": "^4.0.0",
- "koa-send": "^5.0.1",
"koa-static": "^5.0.0",
"koa-swagger-decorator": "^1.8.7",
"koa2-cors": "^2.0.6",
@@ -141,24 +102,11 @@
"mongodb": "6.5.0",
"mongodb-memory-server": "^9.1.7",
"mongoose": "^8.2.1",
- "node-fetch": "3.3.2",
"prettier": "^3.2.4",
- "reconnecting-websocket": "^4.4.0",
- "request": "^2.88.2",
"semver": "^7.6.0",
"swagger-jsdoc": "^6.2.8",
"swagger2": "^4.0.3",
"swagger2-koa": "^4.0.0",
- "ts-jest": "^29.1.2",
- "ts-node": "^10.9.2",
- "turbo": "^1.12.5",
- "turborepo": "^0.0.1",
- "typescript": "^5.3.3",
- "vite": "^5.1.6",
- "vite-plugin-dts": "^3.7.3",
- "vite-plugin-node": "^3.1.0",
- "vite-tsconfig-paths": "^4.3.1",
- "vitest": "^1.3.1",
"winston": "^3.12.0",
"ws": "^8.16.0",
"yamljs": "^0.3.0"
diff --git a/packages/common/Dockerfile b/packages/common/Dockerfile
deleted file mode 100644
index 8fd6d5bed..000000000
--- a/packages/common/Dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM node:17-slim
-ARG MATRIX_TOKEN
-WORKDIR /code
-COPY . .
-RUN ["npm", "i"]
-RUN ["npm", "run", "build"]
-CMD ["npm", "run", "js:start"]
diff --git a/packages/common/Dockerfile-dev b/packages/common/Dockerfile-dev
deleted file mode 100644
index 7b2ec4f15..000000000
--- a/packages/common/Dockerfile-dev
+++ /dev/null
@@ -1,4 +0,0 @@
-FROM node:18 AS builder
-COPY . /app
-WORKDIR /app
-RUN npm install -g typescript && yarn set version 3.2.2 && yarn install && yarn workspace @1kv/common build
diff --git a/packages/common/esbuild.js b/packages/common/esbuild.js
deleted file mode 100644
index 8bd0743c8..000000000
--- a/packages/common/esbuild.js
+++ /dev/null
@@ -1,98 +0,0 @@
-import esbuild from "esbuild";
-
-const isProduction = process.argv.includes("--prod");
-
-const externalPackages = [
- "@polkadot/api-augment",
- "velocityjs",
- "dustjs-linkedin",
- "atpl",
- "liquor",
- "twig",
- "eco",
- "jazz",
- "jqtpl",
- "hamljs",
- "hamlet",
- "whiskers",
- "haml-coffee",
- "hogan.js",
- "templayed",
- "underscore",
- "walrus",
- "mustache",
- "just",
- "ect",
- "mote",
- "toffee",
- "dot",
- "bracket-template",
- "ractive",
- "htmling",
- "babel-core",
- "plates",
- "vash",
- "slm",
- "marko",
- "teacup/lib/express",
- "coffee-script",
- "squirrelly",
- "twing",
- "matris-js-sdk",
- "@1kv/telemetry",
- "@1kv/gateway",
- "@1kv/common",
- "mongoose",
- "logform",
- "winston",
- "ws",
- "form-data",
- "combined-stream",
- "proxy-from-env",
- "follow-redirects",
- "cron",
- "coingecko-api-v3",
- "matrix-js-sdk",
- "node-mongodb-native",
- "mongoose",
-];
-
-const buildOptions = {
- entryPoints: ["src/index.ts"],
- bundle: true,
- minify: isProduction,
- platform: "node",
- target: "node18",
- external: externalPackages,
- outdir: "build",
- tsconfig: "tsconfig.json",
- // splitting: true,
- format: "esm",
- sourcemap: !isProduction,
- logLevel: "info",
-};
-
-if (process.argv.includes("--watch")) {
- buildOptions.watch = {
- onRebuild(error, result) {
- if (error) console.error("watch build failed:", error);
- else
- console.log(
- "watch build succeeded at",
- new Date().toLocaleTimeString(),
- );
- },
- };
- console.log("watch mode enabled");
-}
-
-if (isProduction) {
- buildOptions.define = {
- "process.env.NODE_ENV": "'production'",
- };
-}
-
-esbuild.build(buildOptions).catch((error) => {
- console.error(error);
- process.exit(1);
-});
diff --git a/packages/common/package.json b/packages/common/package.json
index 0d49037e1..6a9f0d316 100644
--- a/packages/common/package.json
+++ b/packages/common/package.json
@@ -1,26 +1,25 @@
{
"name": "@1kv/common",
- "version": "3.3.0",
+ "version": "3.3.1",
"description": "Services for running the Thousand Validator Program.",
"main": "build/index.js",
"types": "build/index.d.ts",
"scripts": {
"build": "tsc --build tsconfig.json",
- "build:prod": "node esbuild.js --prod",
"docker": "docker-compose rm -f; docker-compose build --no-cache; docker-compose up -d",
"lint": "../../node_modules/.bin/eslint 'src/**/*.{js,ts,tsx}' --quiet",
"lint:fix": "../../node_modules/.bin/eslint 'src/**/*.{js,ts,tsx, json}' --quiet --fix",
"clean": "../../node_modules/.bin/rimraf ./build tsconfig.tsbuildinfo combined.log",
"clean:build": "yarn clean && yarn build",
"writeCandidatesFile": "../../node_modules/.bin/esbuild src/scripts/writeCandidatesFile.ts --platform=node --bundle | node && yarn lint:fix",
- "test:unit": "SKIP_MD5=true LOG_LEVEL=info ../../node_modules/.bin/vitest --config vitest.unit.config.mts --no-file-parallelism --coverage",
- "test:unit:silent": "SKIP_MD5=true LOG_LEVEL=warn ../../node_modules/.bin/vitest --config vitest.unit.config.mts --no-file-parallelism --coverage",
- "test:int": "SKIP_MD5=true LOG_LEVEL=info ../../node_modules/.bin/vitest --config vitest.int.config.mts --no-file-parallelism --coverage",
- "test:int:silent": "SKIP_MD5=true LOG_LEVEL=warn ../../node_modules/.bin/vitest --config vitest.int.config.mts --no-file-parallelism --coverage",
- "test:chaindata:int": " ../../node_modules/.bin/vitest chaindata.int.test.ts -c vitest.int.config.mts --coverage",
- "test:apihandler:int": " ../../node_modules/.bin/vitest ApiHandler.int.test.ts -c vitest.int.config.mts --coverage",
- "test:nominator:int": "../../node_modules/.bin/vitest Nominator.int.test.ts -c vitest.int.config.mts --coverage",
- "test:scorekeeper:int": "../../node_modules/.bin/vitest scorekeeper.int.test.ts NumNominations.int.test.ts -c vitest.int.config.mts --coverage",
+ "test:unit": "SKIP_MD5=true LOG_LEVEL=info ../../node_modules/.bin/vitest --config vitest.unit.config.mts --run --no-file-parallelism --coverage",
+ "test:unit:silent": "SKIP_MD5=true LOG_LEVEL=warn ../../node_modules/.bin/vitest --config vitest.unit.config.mts --run --no-file-parallelism --coverage",
+ "test:int": "SKIP_MD5=true LOG_LEVEL=info ../../node_modules/.bin/vitest --config vitest.int.config.mts --run --no-file-parallelism --coverage",
+ "test:int:silent": "SKIP_MD5=true LOG_LEVEL=warn ../../node_modules/.bin/vitest --config vitest.int.config.mts --run --no-file-parallelism --coverage",
+ "test:chaindata:int": " ../../node_modules/.bin/vitest chaindata.int.test.ts -c vitest.int.config.mts --coverage --run",
+ "test:apihandler:int": " ../../node_modules/.bin/vitest ApiHandler.int.test.ts -c vitest.int.config.mts --coverage --run",
+ "test:nominator:int": "../../node_modules/.bin/vitest Nominator.int.test.ts -c vitest.int.config.mts --coverage --run",
+ "test:scorekeeper:int": "../../node_modules/.bin/vitest scorekeeper.int.test.ts NumNominations.int.test.ts -c vitest.int.config.mts --coverage --run",
"test": "yarn test:unit && yarn test:int",
"ci:checkCandidatesFile": "../../node_modules/.bin/ts-node src/scripts/ci/checkCandidatesFile.ts"
},
diff --git a/packages/common/src/ApiHandler/ApiHandler.ts b/packages/common/src/ApiHandler/ApiHandler.ts
index 123564c56..ac1142f22 100644
--- a/packages/common/src/ApiHandler/ApiHandler.ts
+++ b/packages/common/src/ApiHandler/ApiHandler.ts
@@ -2,7 +2,6 @@ import { ApiPromise, WsProvider } from "@polkadot/api";
import EventEmitter from "eventemitter3";
import logger from "../logger";
-import { sleep } from "../utils/util";
import { API_PROVIDER_TIMEOUT, POLKADOT_API_TIMEOUT } from "../constants";
export const apiLabel = { label: "ApiHandler" };
@@ -12,167 +11,147 @@ export const apiLabel = { label: "ApiHandler" };
* to a different provider if one proves troublesome.
*/
class ApiHandler extends EventEmitter {
- private _wsProvider?: WsProvider;
- private _api: ApiPromise | null = null;
- private readonly _endpoints: string[] = [];
- static isConnected = false;
- private healthCheckInProgress = false;
- private _currentEndpoint?: string;
- public upSince: number = Date.now();
+ private wsProvider?: WsProvider;
+ private api: ApiPromise | null = null;
+ private readonly endpoints: string[] = [];
+
+ // If we're reconnecting right now, awaiting on this promise will block until connection succedes
+ private connectionAttempt: Promise | null = null;
+
+ public upSince = -1;
+ public isConnected = false;
+
constructor(endpoints: string[]) {
super();
- this._endpoints = endpoints.sort(() => Math.random() - 0.5);
- this.upSince = Date.now();
+ this.endpoints = endpoints.sort(() => Math.random() - 0.5);
}
- async healthCheck(retries = 0): Promise {
- if (retries < 10) {
+ /**
+ * This copies connectWithRetry() logic from WsProvider
+ * The issue with original logic is that `autoConnectMs` is set to 0 when disconnect() is called, but we
+ * want to call it from nextEndpoint()
+ *
+ * This function can be called multiple times, and it'll wait on the same promise, without spamming reconnects.
+ * @see https://github.com/polkadot-js/api/blob/2ef84c5dcdbbff8aec9ba01e4f13a50130d1a6f3/packages/rpc-provider/src/ws/index.ts#L239-L271
+ */
+ private async connectWithRetry(): Promise {
+ if (!this.wsProvider) {
+ throw new Error(
+ "connectWithRetry() is called before initializing WsProvider",
+ );
+ }
+
+ if (this.connectionAttempt instanceof Promise) {
+ await this.connectionAttempt;
+ return;
+ }
+
+ this.isConnected = false;
+ this.connectionAttempt = new Promise(async (resolve) => {
try {
- this.healthCheckInProgress = true;
- let chain;
-
- const isConnected = this._wsProvider?.isConnected;
- if (isConnected && !this._api?.isConnected) {
- try {
- chain = await this._api?.rpc.system.chain();
- } catch (e) {
- await sleep(API_PROVIDER_TIMEOUT);
- }
- }
- chain = await this._api?.rpc.system.chain();
-
- if (isConnected && chain) {
- this.healthCheckInProgress = false;
- return true;
- } else {
- await sleep(API_PROVIDER_TIMEOUT);
- logger.info(`api still disconnected, disconnecting.`, apiLabel);
- await this._wsProvider?.disconnect();
- await this.getProvider(this._endpoints);
- await this.getAPI();
- return false;
- }
- } catch (e: unknown) {
- const errorMessage =
- e instanceof Error ? e.message : "An unknown error occurred";
- logger.error(
- `Error in health check for WS Provider for rpc. ${errorMessage}`,
+ await this.wsProvider.connect();
+
+ await new Promise((resolve, reject) => {
+ const unsubConnect = this.wsProvider.on("connected", resolve);
+ const unsubDisconnect = this.wsProvider.on("disconnected", reject);
+ this.connectionAttempt.finally(() => {
+ unsubConnect();
+ unsubDisconnect();
+ });
+ });
+
+ this.connectionAttempt = null;
+ this.upSince = Date.now();
+ this.isConnected = true;
+ logger.info(`Connected to ${this.currentEndpoint()}`, apiLabel);
+ resolve();
+ } catch (err) {
+ logger.warn(
+ `Connection attempt to ${this.currentEndpoint()} failed: ${JSON.stringify(err)}, trying next endpoint`,
apiLabel,
);
- this.healthCheckInProgress = false;
- return false;
+ setTimeout(() => {
+ this.connectionAttempt = null;
+ this.connectWithRetry().then(resolve);
+ }, API_PROVIDER_TIMEOUT);
}
- }
- return false;
- }
+ });
- public currentEndpoint() {
- return this._currentEndpoint;
+ await this.connectionAttempt;
}
- async getProvider(endpoints: string[]): Promise {
- return await new Promise((resolve, reject) => {
- const wsProvider = new WsProvider(
- endpoints,
- 5000,
- undefined,
- POLKADOT_API_TIMEOUT,
- );
+ /**
+ * In case of errors like RPC rate limit, we might want to force endpoint change
+ * PJS handles endpoint rotation internally, changing the endpoint on every next connection attempt.
+ * We only disconnect here; reconnect happens inside `"disconnected"` event handler
+ */
+ async nextEndpoint() {
+ logger.info("Rotating API endpoint", apiLabel);
+ await this.wsProvider.disconnect();
+ await this.connectWithRetry();
+ }
- wsProvider.on("disconnected", async () => {
- try {
- const isHealthy = await this.healthCheck();
- logger.info(
- `[Disconnection] ${this._currentEndpoint}} Health check result: ${isHealthy}`,
- apiLabel,
- );
- resolve(wsProvider);
- } catch (error: any) {
- logger.warn(
- `WS provider for rpc ${endpoints[0]} disconnected!`,
- apiLabel,
- );
- reject(error);
- }
- });
- wsProvider.on("connected", () => {
- logger.info(`WS provider for rpc ${endpoints[0]} connected`, apiLabel);
- this._currentEndpoint = endpoints[0];
- resolve(wsProvider);
- });
- wsProvider.on("error", async () => {
- try {
- const isHealthy = await this.healthCheck();
- logger.info(
- `[Error] ${this._currentEndpoint} Health check result: ${isHealthy}`,
- apiLabel,
- );
- resolve(wsProvider);
- } catch (error: any) {
- logger.error(`Error thrown for rpc ${this._endpoints[0]}`, apiLabel);
- reject(error);
- }
- });
- });
+ currentEndpoint(): string | undefined {
+ return this.wsProvider?.endpoint;
}
- async getAPI(retries = 0): Promise {
- if (this._wsProvider && this._api && this._api?.isConnected) {
- return this._api;
+ private async healthCheck(): Promise {
+ if (this.connectionAttempt instanceof Promise) {
+ return;
}
- const endpoints = this._endpoints.sort(() => Math.random() - 0.5);
-
try {
- logger.info(
- `[getAPI]: try ${retries} creating provider with endpoint ${endpoints[0]}`,
+ const api = await this.getApi();
+ await api.rpc.system.chain();
+ } catch (err) {
+ logger.warn(
+ `Healthcheck on ${this.currentEndpoint()} failed: ${JSON.stringify(err)}, trying next endpoint`,
apiLabel,
);
- const provider = await this.getProvider(endpoints);
- this._wsProvider = provider;
- logger.info(
- `[getAPI]: provider created with endpoint: ${endpoints[0]}`,
- apiLabel,
- );
- const api = await ApiPromise.create({
- provider: provider,
- noInitWarn: true,
- });
- await api.isReadyOrError;
- logger.info(`[getApi] Api is ready`, apiLabel);
- return api;
- } catch (e) {
- if (retries < 15) {
- return await this.getAPI(retries + 1);
- } else {
- const provider = await this.getProvider(endpoints);
- return await ApiPromise.create({
- provider: provider,
- noInitWarn: true,
- });
- }
+ await this.nextEndpoint();
}
}
- async setAPI() {
- const api = await this.getAPI(0);
- this._api = api;
- this._registerEventHandlers(this._api);
- return api;
- }
+ /**
+ * This function provides access to PJS api. While the ApiPromise instance never changes,
+ * the function will block if we're reconnecting.
+ * It's intended to be called every time instead of saving ApiPromise instance long-term.
+ */
+ async getApi(): Promise {
+ if (!this.wsProvider) {
+ this.wsProvider = new WsProvider(
+ this.endpoints,
+ false, // Do not autoconnect
+ undefined,
+ POLKADOT_API_TIMEOUT,
+ );
+ await this.connectWithRetry();
+ this.wsProvider.on("disconnected", () => {
+ logger.warn(`WsProvider disconnected`, apiLabel);
+ this.connectWithRetry();
+ });
+ }
+ if (!this.api) {
+ this.api = await ApiPromise.create({
+ provider: this.wsProvider,
+ noInitWarn: true,
+ });
+ await this.api.isReady;
+ this.registerEventHandlers(this.api);
- isConnected(): boolean {
- return this._wsProvider?.isConnected || false;
- }
+ // healthcheck queries RPC, thus its interval can't be shorter than RPC timout
+ setInterval(() => {
+ void this.healthCheck();
+ }, POLKADOT_API_TIMEOUT);
+ }
- getApi(): ApiPromise | null {
- if (!this._api) {
- return null;
- } else {
- return this._api;
+ if (this.connectionAttempt instanceof Promise) {
+ await this.connectionAttempt;
}
+
+ return this.api;
}
- _registerEventHandlers(api: ApiPromise): void {
+ private registerEventHandlers(api: ApiPromise): void {
if (!api) {
logger.warn(`API is null, cannot register event handlers.`, apiLabel);
return;
diff --git a/packages/common/src/ApiHandler/__mocks__/ApiHandler.ts b/packages/common/src/ApiHandler/__mocks__/ApiHandler.ts
index 50365edcf..a411b7636 100644
--- a/packages/common/src/ApiHandler/__mocks__/ApiHandler.ts
+++ b/packages/common/src/ApiHandler/__mocks__/ApiHandler.ts
@@ -1,37 +1,30 @@
import EventEmitter from "eventemitter3";
import { ApiPromise } from "@polkadot/api";
+import { vi } from "vitest";
const createMockApiPromise = (): any => ({
- isConnected: jest.fn().mockReturnValue(true),
+ isConnected: vi.fn().mockReturnValue(true),
query: {
system: {
- events: jest.fn().mockImplementation((callback) => callback([])), // Simplified; adjust as needed
+ events: vi.fn().mockImplementation((callback) => callback([])), // Simplified; adjust as needed
},
},
isReadyOrError: Promise.resolve(),
// Use the function itself to provide a new instance for the 'create' method
- create: jest.fn().mockImplementation(() => createMockApiPromise()),
+ create: vi.fn().mockImplementation(() => createMockApiPromise()),
// Add more mocked methods and properties as needed
});
// A mock class for ApiHandler
class ApiHandlerMock extends EventEmitter {
- private _endpoints: string[];
- private _api: ApiPromise = createMockApiPromise as unknown as ApiPromise;
- private healthCheckInProgress = false;
+ private endpoints: string[];
+ private api: ApiPromise = createMockApiPromise as unknown as ApiPromise;
constructor(endpoints: string[]) {
super();
// Initialize with mock data or behavior as needed
- this._endpoints = endpoints.sort(() => Math.random() - 0.5);
- }
-
- async healthCheck(): Promise {
- this.healthCheckInProgress = true;
- // Simulate the health check logic; adjust the logic as needed for your tests
- const isConnected = this._api.isConnected;
- this.healthCheckInProgress = false;
- return isConnected;
+ this.endpoints = endpoints.sort(() => Math.random() - 0.5);
+ this.api = createMockApiPromise();
}
async getProvider(endpoints: string[]): Promise {
@@ -42,25 +35,15 @@ class ApiHandlerMock extends EventEmitter {
return undefined;
}
- async getAPI(retries = 0): Promise {
- // Use the mockApiPromise directly for simplicity
- return this._api;
- }
-
- async setAPI(): Promise {
- // Directly set the mock _api; in a real scenario, you might want to simulate more complex logic
- this._api = await this.getAPI();
- }
-
- isConnected(): boolean {
- return this._api.isConnected;
+ get isConnected(): boolean {
+ return this.api.isConnected;
}
- getApi(): ApiPromise {
- return this._api;
+ async getApi(): Promise {
+ return this.api;
}
- _registerEventHandlers(api: ApiPromise): void {
+ private registerEventHandlers(api: ApiPromise): void {
// Simplify the event handler registration for testing purposes
// In a real scenario, you might want to simulate more complex event handling
api.query.system.events((events) => {
diff --git a/packages/common/src/chaindata/chaindata.ts b/packages/common/src/chaindata/chaindata.ts
index 8ea155a6d..70009da6d 100644
--- a/packages/common/src/chaindata/chaindata.ts
+++ b/packages/common/src/chaindata/chaindata.ts
@@ -1,7 +1,6 @@
-import { ApiPromise, WsProvider } from "@polkadot/api";
import ApiHandler, { apiLabel } from "../ApiHandler/ApiHandler";
import logger from "../logger";
-import { NumberResult } from "../types";
+import { ApiHandlers, NumberResult } from "../types";
import {
getApiAt,
getApiAtBlockHash,
@@ -59,86 +58,26 @@ import {
getNominators,
NominatorInfo,
} from "./queries/Nomination";
-import { CHAINDATA_RETRIES } from "../constants";
import { Identity } from "../db";
import { Block } from "@polkadot/types/interfaces";
import { ApiDecoration } from "@polkadot/api/types";
-type JSON = any;
-
export const chaindataLabel = { label: "Chaindata" };
+export enum HandlerType {
+ RelayHandler,
+ PeopleHandler,
+}
+
export class ChainData {
public handler: ApiHandler;
- public api: ApiPromise | null;
- public apiPeople: ApiPromise | null;
+ public peopleHandler: ApiHandler;
- constructor(handler: ApiHandler) {
- this.handler = handler;
- this.api = handler.getApi();
- this.setApiPeople();
+ constructor(handlers: ApiHandlers) {
+ this.handler = handlers.relay;
+ this.peopleHandler = handlers.people;
}
- setApiPeople = async (): Promise => {
- if (!(await this.api.rpc.system.chain()).toLowerCase().includes("kusama"))
- return;
-
- const provider = new WsProvider("wss://kusama-people-rpc.polkadot.io");
- this.apiPeople = await ApiPromise.create({ provider: provider });
- if (this.apiPeople) {
- this.apiPeople.on("error", (error) => {
- if (
- error.toString().includes("FATAL") ||
- JSON.stringify(error).includes("FATAL")
- ) {
- logger.error("The API had a FATAL error... exiting!");
- process.exit(1);
- }
- });
- }
- await this.apiPeople.isReadyOrError;
-
- const [chain, nodeName, nodeVersion] = await Promise.all([
- this.apiPeople.rpc.system.chain(),
- this.apiPeople.rpc.system.name(),
- this.apiPeople.rpc.system.version(),
- ]);
- logger.info(
- `You are connected to chain ${chain} using ${nodeName} v${nodeVersion}`,
- apiLabel,
- );
- return;
- };
-
- checkApiConnection = async (retries = 0): Promise => {
- // Check if the API is already connected
- if (this.handler.getApi()?.isConnected) {
- return true; // API is connected
- }
-
- // If not connected and retries are available
- if (retries < CHAINDATA_RETRIES) {
- await this.delay(1000); // Wait before retrying
- return await this.checkApiConnection(retries + 1); // Recursive call with incremented retries
- }
-
- // If no retries left, perform health check
- logger.warn("Performing health check on api...", chaindataLabel);
- const api = this.handler.getApi();
- if (api) {
- await api.disconnect(); // Ensure disconnect is called on an existing API instance
- }
- const healthy = await this.handler.healthCheck();
-
- if (healthy) {
- this.api = this.handler.getApi();
- return true; // Health check passed, API is healthy
- }
-
- // Exceeded retries without connecting and health check failed
- return false;
- };
-
// Helper function to introduce delay
delay = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
@@ -386,15 +325,25 @@ export class ChainData {
};
}
-export const handleError = async (chaindata, e, functionName: string) => {
+export const handleError = async (
+ chaindata,
+ e,
+ functionName: string,
+ handlerType: HandlerType,
+) => {
const errorMessage = e instanceof Error ? e.message : String(e);
+
+ const handler =
+ handlerType === HandlerType.PeopleHandler
+ ? chaindata.peopleHandler
+ : chaindata.handler;
if (errorMessage.includes("RPC rate limit exceeded")) {
logger.warn(
- `RPC rate limit exceeded from ${chaindata.handler.currentEndpoint()}. Switching to a different endpoint.`,
+ `RPC rate limit exceeded from ${handler.currentEndpoint()}. Switching to a different endpoint.`,
apiLabel,
);
try {
- await chaindata.handler.setAPI();
+ await handler.nextEndpoint();
} catch (error) {
logger.error(
`Error while switching to a different endpoint: ${error}`,
diff --git a/packages/common/src/chaindata/queries/ChainMeta.ts b/packages/common/src/chaindata/queries/ChainMeta.ts
index 5bda59330..3509529c7 100644
--- a/packages/common/src/chaindata/queries/ChainMeta.ts
+++ b/packages/common/src/chaindata/queries/ChainMeta.ts
@@ -4,7 +4,7 @@
* @function ChainMeta
*/
-import { ChainData, handleError } from "../chaindata";
+import { ChainData, handleError, HandlerType } from "../chaindata";
import { ApiDecoration } from "@polkadot/api/types";
import { Block } from "@polkadot/types/interfaces";
@@ -13,17 +13,14 @@ export const getChainType = async (
chaindata: ChainData,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
-
- const chainType = await chaindata?.api?.rpc.system.chain();
+ const api = await chaindata.handler.getApi();
+ const chainType = await api.rpc.system.chain();
if (chainType) {
return chainType.toString();
}
return null;
} catch (e) {
- await handleError(chaindata, e, "getChainType");
+ await handleError(chaindata, e, "getChainType", HandlerType.RelayHandler);
return null;
}
};
@@ -32,19 +29,14 @@ export const getDenom = async (
chaindata: ChainData,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
+ const api = await chaindata.handler.getApi();
- const chainType = await chaindata?.api?.rpc.system.chain();
- if (!chainType) {
- return null;
- }
- const denom =
- chainType.toString() == "Polkadot" ? 10000000000 : 1000000000000;
- return denom;
+ const chainProps = await api.registry.getChainProperties();
+ const decimals = chainProps.tokenDecimals.toJSON()[0];
+
+ return 10 ** decimals;
} catch (e) {
- await handleError(chaindata, e, "getDenom");
+ await handleError(chaindata, e, "getDenom", HandlerType.RelayHandler);
return null;
}
};
@@ -54,18 +46,15 @@ export const getApiAt = async (
blockNumber: number,
): Promise | null> => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
-
const hash = await chaindata.getBlockHash(blockNumber);
if (hash) {
- return (await chaindata?.api?.at(hash)) ?? null;
+ const api = await chaindata.handler.getApi();
+ return (await api.at(hash)) ?? null;
} else {
return null;
}
} catch (e) {
- await handleError(chaindata, e, "getApiAt");
+ await handleError(chaindata, e, "getApiAt", HandlerType.RelayHandler);
return null;
}
};
@@ -74,10 +63,7 @@ export const getApiAtBlockHash = async (
blockHash: string,
): Promise | null> => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
- const api = chaindata?.api;
+ const api = await chaindata.handler.getApi();
if (api) {
const apiResult = await api.at(blockHash);
return apiResult ?? null;
@@ -85,7 +71,12 @@ export const getApiAtBlockHash = async (
return null;
}
} catch (e) {
- await handleError(chaindata, e, "getApiAtBlockHash");
+ await handleError(
+ chaindata,
+ e,
+ "getApiAtBlockHash",
+ HandlerType.RelayHandler,
+ );
return null;
}
};
@@ -95,17 +86,14 @@ export const getBlockHash = async (
blockNumber: number,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
-
- const hash = await chaindata?.api?.rpc.chain.getBlockHash(blockNumber);
+ const api = await chaindata.handler.getApi();
+ const hash = await api.rpc.chain.getBlockHash(blockNumber);
if (hash) {
return hash.toString();
}
return null;
} catch (e) {
- await handleError(chaindata, e, "getBlockHash");
+ await handleError(chaindata, e, "getBlockHash", HandlerType.RelayHandler);
return null;
}
};
@@ -115,18 +103,16 @@ export const getBlock = async (
blockNumber: number,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
const hash = await chaindata.getBlockHash(blockNumber);
if (hash) {
- const signedBlock = await chaindata?.api?.rpc.chain.getBlock(hash);
+ const api = await chaindata.handler.getApi();
+ const signedBlock = await api.rpc.chain.getBlock(hash);
return signedBlock?.block ?? null;
} else {
return null;
}
} catch (e) {
- await handleError(chaindata, e, "getBlock");
+ await handleError(chaindata, e, "getBlock", HandlerType.RelayHandler);
return null;
}
};
@@ -135,13 +121,11 @@ export const getLatestBlock = async (
chaindata: ChainData,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
- const block = await chaindata?.api?.rpc.chain.getBlock();
+ const api = await chaindata.handler.getApi();
+ const block = await api.rpc.chain.getBlock();
return block?.block.header.number.toNumber() ?? null;
} catch (e) {
- await handleError(chaindata, e, "getLatestBlock");
+ await handleError(chaindata, e, "getLatestBlock", HandlerType.RelayHandler);
return null;
}
};
@@ -149,16 +133,19 @@ export const getLatestBlockHash = async (
chaindata: ChainData,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
- const hash = await chaindata?.api?.rpc.chain.getBlockHash();
+ const api = await chaindata.handler.getApi();
+ const hash = await api.rpc.chain.getBlockHash();
if (hash) {
return hash.toString();
}
return null;
} catch (e) {
- await handleError(chaindata, e, "getLatestBlockHash");
+ await handleError(
+ chaindata,
+ e,
+ "getLatestBlockHash",
+ HandlerType.RelayHandler,
+ );
return null;
}
};
diff --git a/packages/common/src/chaindata/queries/Era.ts b/packages/common/src/chaindata/queries/Era.ts
index 277d41c10..2a5be8030 100644
--- a/packages/common/src/chaindata/queries/Era.ts
+++ b/packages/common/src/chaindata/queries/Era.ts
@@ -1,4 +1,4 @@
-import ChainData, { handleError } from "../chaindata";
+import ChainData, { handleError, HandlerType } from "../chaindata";
import logger from "../../logger";
import { NumberResult, StringResult } from "../../types";
import {
@@ -13,14 +13,10 @@ export const getEraAt = async (
apiAt: ApiDecoration<"promise">,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
-
return ((await apiAt.query.staking.activeEra()).toJSON() as any)
.index as number;
} catch (e) {
- await handleError(chaindata, e, "getEraAt");
+ await handleError(chaindata, e, "getEraAt", HandlerType.RelayHandler);
return null;
}
};
@@ -34,14 +30,12 @@ export interface EraPointsInfo {
eraPoints: number;
}>;
}
+
export const getTotalEraPoints = async (
chaindata: ChainData,
era: number,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return {} as EraPointsInfo;
- }
const chainType = await chaindata.getChainType();
if (!chainType) {
return {} as EraPointsInfo;
@@ -52,7 +46,8 @@ export const getTotalEraPoints = async (
);
if (blockHash) {
- const apiAt = await chaindata?.api?.at(blockHash);
+ const api = await chaindata.handler.getApi();
+ const apiAt = await api.at(blockHash);
if (!apiAt) {
return {} as EraPointsInfo;
@@ -77,7 +72,12 @@ export const getTotalEraPoints = async (
}
return {} as EraPointsInfo;
} catch (e) {
- await handleError(chaindata, e, "getTotalEraPoints");
+ await handleError(
+ chaindata,
+ e,
+ "getTotalEraPoints",
+ HandlerType.RelayHandler,
+ );
return {} as EraPointsInfo;
}
};
@@ -88,10 +88,6 @@ export const getErasMinStakeAt = async (
era: number,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
-
const denom: number | null = await chaindata.getDenom();
if (denom === null) {
return null;
@@ -114,7 +110,12 @@ export const getErasMinStakeAt = async (
return minStake[0]?.total;
}
} catch (e) {
- await handleError(chaindata, e, "getErasMinStakeAt");
+ await handleError(
+ chaindata,
+ e,
+ "getErasMinStakeAt",
+ HandlerType.RelayHandler,
+ );
return null;
}
};
@@ -123,10 +124,8 @@ export const getActiveEraIndex = async (
chaindata: ChainData,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [0, "API not connected"];
- }
- const activeEra = await chaindata?.api?.query.staking.activeEra();
+ const api = await chaindata.handler.getApi();
+ const activeEra = await api.query.staking.activeEra();
if (!activeEra || activeEra.isNone) {
logger.info(`NO ACTIVE ERA:`);
return [
@@ -137,7 +136,12 @@ export const getActiveEraIndex = async (
const activeEraNumber = activeEra.unwrap().index.toNumber();
return [activeEraNumber, null];
} catch (e) {
- await handleError(chaindata, e, "getActiveEraIndex");
+ await handleError(
+ chaindata,
+ e,
+ "getActiveEraIndex",
+ HandlerType.RelayHandler,
+ );
return [0, JSON.stringify(e)];
}
};
@@ -146,13 +150,11 @@ export const getCurrentEra = async (
chaindata: ChainData,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
- const currentEra = await chaindata?.api?.query.staking.currentEra();
+ const api = await chaindata.handler.getApi();
+ const currentEra = await api.query.staking.currentEra();
return Number(currentEra);
} catch (e) {
- await handleError(chaindata, e, "getCurrentEra");
+ await handleError(chaindata, e, "getCurrentEra", HandlerType.RelayHandler);
return null;
}
};
@@ -163,9 +165,6 @@ export const findEraBlockHash = async (
chainType: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return ["", "API not connected."];
- }
const eraBlockLength =
chainType == "Kusama"
? KUSAMA_APPROX_ERA_LENGTH_IN_BLOCKS
@@ -173,8 +172,6 @@ export const findEraBlockHash = async (
? POLKADOT_APPROX_ERA_LENGTH_IN_BLOCKS
: TESTNET_APPROX_ERA_LENGTH_IN_BLOCKS;
- await chaindata.checkApiConnection();
-
const [activeEraIndex, err] = await chaindata.getActiveEraIndex();
if (err) {
return ["", err];
@@ -184,7 +181,8 @@ export const findEraBlockHash = async (
return ["", "Era has not happened."];
}
- const latestBlock = await chaindata?.api?.rpc.chain.getBlock();
+ const api = await chaindata.handler.getApi();
+ const latestBlock = await api.rpc.chain.getBlock();
if (!latestBlock) {
return ["", "Latest block is null"];
}
@@ -202,7 +200,7 @@ export const findEraBlockHash = async (
if (!blockHash) {
return ["", "Block hash is null"];
}
- const apiAt = await chaindata?.api?.at(blockHash);
+ const apiAt = await api.at(blockHash);
if (!apiAt) {
return ["", "API at block hash is null"];
}
@@ -227,7 +225,12 @@ export const findEraBlockHash = async (
}
return ["", "Not Found!"];
} catch (e) {
- await handleError(chaindata, e, "findEraBlockHash");
+ await handleError(
+ chaindata,
+ e,
+ "findEraBlockHash",
+ HandlerType.RelayHandler,
+ );
return ["", JSON.stringify(e)];
}
};
@@ -238,9 +241,6 @@ export const findEraBlockNumber = async (
chainType: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [0, "API not connected."];
- }
const eraBlockLength =
chainType == "Kusama"
? KUSAMA_APPROX_ERA_LENGTH_IN_BLOCKS
@@ -248,8 +248,6 @@ export const findEraBlockNumber = async (
? POLKADOT_APPROX_ERA_LENGTH_IN_BLOCKS
: TESTNET_APPROX_ERA_LENGTH_IN_BLOCKS;
- await chaindata.checkApiConnection();
-
const [activeEraIndex, err] = await chaindata.getActiveEraIndex();
if (err) {
return [0, err];
@@ -281,9 +279,10 @@ export const findEraBlockNumber = async (
if (!blockHash) {
return [0, "Block hash is null"];
}
+
+ const api = await chaindata.handler.getApi();
const testEra =
- (await chaindata?.api?.query.staking.activeEra.at(blockHash)) ||
- undefined; // Handle possible undefined
+ (await api.query.staking.activeEra.at(blockHash)) || undefined; // Handle possible undefined
if (!testEra || testEra.isNone) {
logger.info(`Test era is none`);
return [0, "Test era is none"];
@@ -303,7 +302,12 @@ export const findEraBlockNumber = async (
}
return [0, "Not Found!"];
} catch (e) {
- await handleError(chaindata, e, "findEraBlockNumber");
+ await handleError(
+ chaindata,
+ e,
+ "findEraBlockNumber",
+ HandlerType.RelayHandler,
+ );
return [0, JSON.stringify(e)];
}
};
diff --git a/packages/common/src/chaindata/queries/Identity.ts b/packages/common/src/chaindata/queries/Identity.ts
index d6d90d3b5..8e8a2c2ff 100644
--- a/packages/common/src/chaindata/queries/Identity.ts
+++ b/packages/common/src/chaindata/queries/Identity.ts
@@ -1,4 +1,4 @@
-import Chaindata, { handleError } from "../chaindata";
+import Chaindata, { handleError, HandlerType } from "../chaindata";
import { Identity } from "../../types";
export const hasIdentity = async (
@@ -6,7 +6,7 @@ export const hasIdentity = async (
account: string,
): Promise<[boolean, boolean]> => {
try {
- const api = chaindata.apiPeople ? chaindata.apiPeople : chaindata.api;
+ const api = await chaindata.peopleHandler.getApi();
if (!api?.isConnected) {
return [false, false];
@@ -33,7 +33,7 @@ export const hasIdentity = async (
return [identity ? identity.isSome : false, verified];
} catch (e) {
- if (!chaindata.apiPeople) await handleError(chaindata, e, "hasIdentity");
+ await handleError(chaindata, e, "hasIdentity", HandlerType.PeopleHandler);
return [false, true];
}
};
@@ -43,7 +43,8 @@ export const getFormattedIdentity = async (
addr: string,
): Promise => {
try {
- const api = chaindata.apiPeople ? chaindata.apiPeople : chaindata.api;
+ const api = await chaindata.peopleHandler.getApi();
+
if (!api?.isConnected) {
return null;
}
@@ -155,8 +156,12 @@ export const getFormattedIdentity = async (
return identity;
} catch (e) {
- if (!chaindata.apiPeople)
- await handleError(chaindata, e, "getFormattedIdentity");
+ await handleError(
+ chaindata,
+ e,
+ "getFormattedIdentity",
+ HandlerType.PeopleHandler,
+ );
return null;
}
};
diff --git a/packages/common/src/chaindata/queries/Nomination.ts b/packages/common/src/chaindata/queries/Nomination.ts
index 241636fd5..0f1756b1d 100644
--- a/packages/common/src/chaindata/queries/Nomination.ts
+++ b/packages/common/src/chaindata/queries/Nomination.ts
@@ -1,13 +1,11 @@
-import Chaindata, { handleError } from "../chaindata";
+import Chaindata, { handleError, HandlerType } from "../chaindata";
export const getNominatorAddresses = async (
chaindata: Chaindata,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [];
- }
- const nominators = await chaindata.api?.query.staking.nominators.entries();
+ const api = await chaindata.handler.getApi();
+ const nominators = await api.query.staking.nominators.entries();
const nominatorMap = nominators
?.map((nominator) => {
const [key, targets] = nominator;
@@ -22,7 +20,12 @@ export const getNominatorAddresses = async (
.filter((address) => address !== undefined) as string[];
return nominatorMap;
} catch (e) {
- await handleError(chaindata, e, "getNominatorAddresses");
+ await handleError(
+ chaindata,
+ e,
+ "getNominatorAddresses",
+ HandlerType.RelayHandler,
+ );
return [];
}
};
@@ -37,11 +40,8 @@ export const getNominators = async (
chaindata: Chaindata,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [];
- }
- const nominatorEntries =
- await chaindata.api?.query.staking.nominators.entries();
+ const api = await chaindata.handler.getApi();
+ const nominatorEntries = await api.query.staking.nominators.entries();
if (!nominatorEntries) {
return [];
}
@@ -66,7 +66,7 @@ export const getNominators = async (
}),
);
} catch (e) {
- await handleError(chaindata, e, "getNominators");
+ await handleError(chaindata, e, "getNominators", HandlerType.RelayHandler);
return [];
}
};
@@ -77,14 +77,16 @@ export const getNominatorLastNominationEra = async (
address: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
- const lastNominationEra =
- await chaindata.api?.query.staking.nominators(address);
+ const api = await chaindata.handler.getApi();
+ const lastNominationEra = await api.query.staking.nominators(address);
return lastNominationEra?.unwrapOrDefault().submittedIn.toNumber() || null;
} catch (e) {
- await handleError(chaindata, e, "getNominatorLastNominationEra");
+ await handleError(
+ chaindata,
+ e,
+ "getNominatorLastNominationEra",
+ HandlerType.RelayHandler,
+ );
return null;
}
};
@@ -95,13 +97,16 @@ export const getNominatorCurrentTargets = async (
address: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
- const targets = await chaindata.api?.query.staking.nominators(address);
+ const api = await chaindata.handler.getApi();
+ const targets = await api.query.staking.nominators(address);
return targets?.unwrapOrDefault().targets.toJSON() as string[];
} catch (e) {
- await handleError(chaindata, e, "getNominatorCurrentTargets");
+ await handleError(
+ chaindata,
+ e,
+ "getNominatorCurrentTargets",
+ HandlerType.RelayHandler,
+ );
return null;
}
};
diff --git a/packages/common/src/chaindata/queries/Proxy.ts b/packages/common/src/chaindata/queries/Proxy.ts
index 2c46ed2ce..0766ca507 100644
--- a/packages/common/src/chaindata/queries/Proxy.ts
+++ b/packages/common/src/chaindata/queries/Proxy.ts
@@ -1,4 +1,4 @@
-import { ChainData, handleError } from "../chaindata";
+import { ChainData, handleError, HandlerType } from "../chaindata";
export interface ProxyAnnouncement {
real: string;
@@ -11,11 +11,8 @@ export const getProxyAnnouncements = async (
address: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [];
- }
- const announcements =
- await chaindata.api?.query.proxy.announcements(address);
+ const api = await chaindata.handler.getApi();
+ const announcements = await api.query.proxy.announcements(address);
if (!announcements) {
return [];
}
@@ -36,7 +33,12 @@ export const getProxyAnnouncements = async (
return [];
}
} catch (e) {
- await handleError(chaindata, e, "getProxyAnnouncements");
+ await handleError(
+ chaindata,
+ e,
+ "getProxyAnnouncements",
+ HandlerType.RelayHandler,
+ );
return [];
}
};
diff --git a/packages/common/src/chaindata/queries/Session.ts b/packages/common/src/chaindata/queries/Session.ts
index 299fb9643..70134c94f 100644
--- a/packages/common/src/chaindata/queries/Session.ts
+++ b/packages/common/src/chaindata/queries/Session.ts
@@ -4,25 +4,22 @@
* @function Sessio
*/
-import { ChainData, handleError } from "../chaindata";
+import { ChainData, handleError, HandlerType } from "../chaindata";
import { ApiDecoration } from "@polkadot/api/types";
export const getSession = async (
chaindata: ChainData,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
-
- const currentIndex = await chaindata?.api?.query.session.currentIndex();
+ const api = await chaindata.handler.getApi();
+ const currentIndex = await api.query.session.currentIndex();
if (currentIndex !== undefined) {
return Number(currentIndex.toString());
} else {
return null;
}
} catch (e) {
- await handleError(chaindata, e, "getSession");
+ await handleError(chaindata, e, "getSession", HandlerType.RelayHandler);
return null;
}
};
@@ -32,14 +29,10 @@ export const getSessionAt = async (
apiAt: ApiDecoration<"promise">,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
-
const session = (await apiAt.query.session.currentIndex()).toString();
return parseInt(session.replace(/,/g, ""));
} catch (e) {
- await handleError(chaindata, e, "getSessionAt");
+ await handleError(chaindata, e, "getSessionAt", HandlerType.RelayHandler);
return null;
}
};
@@ -49,9 +42,6 @@ export const getSessionAtEra = async (
era: number,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
const chainType = await chaindata.getChainType();
if (chainType) {
const [blockHash, err] = await chaindata.findEraBlockHash(era, chainType);
@@ -64,7 +54,12 @@ export const getSessionAtEra = async (
}
return null;
} catch (e) {
- await handleError(chaindata, e, "getSessionAtEra");
+ await handleError(
+ chaindata,
+ e,
+ "getSessionAtEra",
+ HandlerType.RelayHandler,
+ );
return null;
}
};
diff --git a/packages/common/src/chaindata/queries/ValidatorPref.ts b/packages/common/src/chaindata/queries/ValidatorPref.ts
index ec716b6da..f0ca6ece4 100644
--- a/packages/common/src/chaindata/queries/ValidatorPref.ts
+++ b/packages/common/src/chaindata/queries/ValidatorPref.ts
@@ -1,5 +1,9 @@
import { NumberResult } from "../../types";
-import ChainData, { chaindataLabel, handleError } from "../chaindata";
+import ChainData, {
+ chaindataLabel,
+ handleError,
+ HandlerType,
+} from "../chaindata";
import logger from "../../logger";
import { ApiDecoration } from "@polkadot/api/types";
@@ -8,16 +12,14 @@ export const getCommission = async (
validator: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [0, "API not connected."];
- }
- const prefs = await chaindata?.api?.query.staking.validators(validator);
+ const api = await chaindata.handler.getApi();
+ const prefs = await api.query.staking.validators(validator);
if (!prefs) {
return [0, "No preferences found."];
}
return [prefs.commission.toNumber(), null];
} catch (e) {
- await handleError(chaindata, e, "getCommission");
+ await handleError(chaindata, e, "getCommission", HandlerType.RelayHandler);
return [0, JSON.stringify(e)];
}
};
@@ -29,16 +31,18 @@ export const getCommissionInEra = async (
validator: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
const prefs = await apiAt?.query?.staking.erasValidatorPrefs(
eraIndex,
validator,
);
return prefs?.commission?.toNumber();
} catch (e) {
- await handleError(chaindata, e, "getCommissionInEra");
+ await handleError(
+ chaindata,
+ e,
+ "getCommissionInEra",
+ HandlerType.RelayHandler,
+ );
return null;
}
};
@@ -48,13 +52,11 @@ export const getBlocked = async (
validator: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return false;
- }
- const rawPrefs = await chaindata?.api?.query.staking.validators(validator);
+ const api = await chaindata.handler.getApi();
+ const rawPrefs = await api.query.staking.validators(validator);
return rawPrefs?.blocked?.toString() === "true";
} catch (e) {
- await handleError(chaindata, e, "getBlocked");
+ await handleError(chaindata, e, "getBlocked", HandlerType.RelayHandler);
return false;
}
};
@@ -66,17 +68,15 @@ export const isBonded = async (
stash: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return false;
- }
- const bonded = await chaindata?.api?.query.staking.bonded(stash);
+ const api = await chaindata.handler.getApi();
+ const bonded = await api.query.staking.bonded(stash);
if (bonded) {
return bonded.isSome;
} else {
return false;
}
} catch (e) {
- await handleError(chaindata, e, "isBonded");
+ await handleError(chaindata, e, "isBonded", HandlerType.RelayHandler);
return false;
}
};
@@ -87,15 +87,13 @@ export const getDenomBondedAmount = async (
stash: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [0, "API not connected."];
- }
- const bondedAddress = await chaindata?.api?.query.staking.bonded(stash);
+ const api = await chaindata.handler.getApi();
+ const bondedAddress = await api.query.staking.bonded(stash);
if (!bondedAddress || bondedAddress.isNone) {
return [0, "Not bonded to any account."];
}
- const ledger: any = await chaindata?.api?.query.staking.ledger(
+ const ledger: any = await api.query.staking.ledger(
bondedAddress.toString(),
);
if (!ledger || ledger.isNone) {
@@ -110,7 +108,12 @@ export const getDenomBondedAmount = async (
return [0, null];
}
} catch (e) {
- await handleError(chaindata, e, "getDenomBondedAmount");
+ await handleError(
+ chaindata,
+ e,
+ "getDenomBondedAmount",
+ HandlerType.RelayHandler,
+ );
return [0, JSON.stringify(e)];
}
};
@@ -120,15 +123,13 @@ export const getBondedAmount = async (
stash: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [0, "API not connected."];
- }
- const bondedAddress = await chaindata?.api?.query.staking.bonded(stash);
+ const api = await chaindata.handler.getApi();
+ const bondedAddress = await api.query.staking.bonded(stash);
if (!bondedAddress || bondedAddress.isNone) {
return [0, "Not bonded to any account."];
}
- const ledger: any = await chaindata?.api?.query.staking.ledger(
+ const ledger: any = await api.query.staking.ledger(
bondedAddress.toString(),
);
if (!ledger || ledger.isNone) {
@@ -147,16 +148,19 @@ export const getControllerFromStash = async (
stash: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
- const controller = await chaindata?.api?.query.staking.bonded(stash);
+ const api = await chaindata.handler.getApi();
+ const controller = await api.query.staking.bonded(stash);
if (!controller) {
return null;
}
return controller.toString();
} catch (e) {
- await handleError(chaindata, e, "getControllerFromStash");
+ await handleError(
+ chaindata,
+ e,
+ "getControllerFromStash",
+ HandlerType.RelayHandler,
+ );
return null;
}
};
@@ -166,18 +170,20 @@ export const getRewardDestination = async (
stash: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
- const rewardDestination: any =
- await chaindata.api?.query.staking.payee(stash);
+ const api = await chaindata.handler.getApi();
+ const rewardDestination: any = await api.query.staking.payee(stash);
if (rewardDestination?.toJSON()?.account) {
return rewardDestination?.toJSON()?.account;
} else {
return rewardDestination?.toString();
}
} catch (e) {
- await handleError(chaindata, e, "getRewardDestination");
+ await handleError(
+ chaindata,
+ e,
+ "getRewardDestination",
+ HandlerType.RelayHandler,
+ );
return null;
}
};
@@ -188,9 +194,6 @@ export const getRewardDestinationAt = async (
stash: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
const rewardDestination: any = await apiAt.query.staking.payee(stash);
if (rewardDestination.toJSON().account) {
return rewardDestination.toJSON().account;
@@ -212,10 +215,8 @@ export const getQueuedKeys = async (
chaindata: ChainData,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [];
- }
- const queuedKeys = await chaindata.api?.query.session.queuedKeys();
+ const api = await chaindata.handler.getApi();
+ const queuedKeys = await api.query.session.queuedKeys();
if (!queuedKeys) {
return [];
}
@@ -227,7 +228,7 @@ export const getQueuedKeys = async (
});
return keys;
} catch (e) {
- await handleError(chaindata, e, "getQueuedKeys");
+ await handleError(chaindata, e, "getQueuedKeys", HandlerType.RelayHandler);
return [];
}
};
@@ -249,10 +250,8 @@ export const getNextKeys = async (
stash: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
- const nextKeysRaw = await chaindata.api?.query.session.nextKeys(stash);
+ const api = await chaindata.handler.getApi();
+ const nextKeysRaw = await api.query.session.nextKeys(stash);
if (!nextKeysRaw) {
return null;
}
@@ -267,7 +266,7 @@ export const getNextKeys = async (
}
}
} catch (e) {
- await handleError(chaindata, e, "getNextKeys");
+ await handleError(chaindata, e, "getNextKeys", HandlerType.RelayHandler);
}
return null;
};
@@ -281,10 +280,8 @@ export const getBalance = async (
address: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
- const accountData = await chaindata.api?.query.system.account(address);
+ const api = await chaindata.handler.getApi();
+ const accountData = await api.query.system.account(address);
if (!accountData) {
return null;
}
@@ -315,14 +312,9 @@ export const getExposure = async (
validator: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
+ const api = await chaindata.handler.getApi();
const denom = await chaindata.getDenom();
- const eraStakers = await chaindata.api?.query.staking.erasStakers(
- eraIndex,
- validator,
- );
+ const eraStakers = await api.query.staking.erasStakers(eraIndex, validator);
if (eraStakers && denom) {
const total = parseFloat(eraStakers.total.toString()) / denom;
const own = parseFloat(eraStakers.own.toString()) / denom;
@@ -346,7 +338,7 @@ export const getExposure = async (
return null;
} catch (e) {
- await handleError(chaindata, e, "getExposure");
+ await handleError(chaindata, e, "getExposure", HandlerType.RelayHandler);
return null;
}
};
@@ -358,9 +350,6 @@ export const getExposureAt = async (
validator: string,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return null;
- }
const denom = await chaindata.getDenom();
const eraStakers = await apiAt.query.staking.erasStakers(
eraIndex,
@@ -384,7 +373,7 @@ export const getExposureAt = async (
}
return null;
} catch (e) {
- await handleError(chaindata, e, "getExposureAt");
+ await handleError(chaindata, e, "getExposureAt", HandlerType.RelayHandler);
return null;
}
};
diff --git a/packages/common/src/chaindata/queries/Validators.ts b/packages/common/src/chaindata/queries/Validators.ts
index 31c46bb1a..a3cb2be69 100644
--- a/packages/common/src/chaindata/queries/Validators.ts
+++ b/packages/common/src/chaindata/queries/Validators.ts
@@ -1,4 +1,4 @@
-import Chaindata, { handleError } from "../chaindata";
+import Chaindata, { handleError, HandlerType } from "../chaindata";
import logger from "../../logger";
export const getActiveValidatorsInPeriod = async (
@@ -8,9 +8,6 @@ export const getActiveValidatorsInPeriod = async (
chainType: string,
): Promise<[string[] | null, string | null]> => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [null, null];
- }
const allValidators: Set = new Set();
let testEra = startEra;
while (testEra <= endEra) {
@@ -22,8 +19,8 @@ export const getActiveValidatorsInPeriod = async (
return [null, err];
}
- const validators =
- await chaindata.api?.query.session.validators.at(blockHash);
+ const api = await chaindata.handler.getApi();
+ const validators = await api.query.session.validators.at(blockHash);
if (!validators) {
return [null, "Error getting validators"];
}
@@ -39,7 +36,12 @@ export const getActiveValidatorsInPeriod = async (
return [Array.from(allValidators), null];
} catch (e) {
- await handleError(chaindata, e, "getActiveValidatorsInPeriod");
+ await handleError(
+ chaindata,
+ e,
+ "getActiveValidatorsInPeriod",
+ HandlerType.RelayHandler,
+ );
return [[], JSON.stringify(e)];
}
};
@@ -48,17 +50,19 @@ export const currentValidators = async (
chaindata: Chaindata,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [];
- }
-
- const validators = await chaindata.api?.query.session.validators();
+ const api = await chaindata.handler.getApi();
+ const validators = await api.query.session.validators();
if (!validators) {
return [];
}
return validators.toJSON() as string[];
} catch (e) {
- await handleError(chaindata, e, "currentValidators");
+ await handleError(
+ chaindata,
+ e,
+ "currentValidators",
+ HandlerType.RelayHandler,
+ );
return [];
}
};
@@ -67,10 +71,8 @@ export const getValidators = async (
chaindata: Chaindata,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [];
- }
- const keys = await chaindata.api?.query.staking.validators.keys();
+ const api = await chaindata.handler.getApi();
+ const keys = await api.query.staking.validators.keys();
if (!keys) {
return [];
}
@@ -78,7 +80,7 @@ export const getValidators = async (
return validators;
} catch (e) {
- await handleError(chaindata, e, "getValidators");
+ await handleError(chaindata, e, "getValidators", HandlerType.RelayHandler);
return [];
}
};
@@ -88,12 +90,14 @@ export const getValidatorsAt = async (
apiAt: any,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [];
- }
return (await apiAt.query.session.validators()).toJSON();
} catch (e) {
- await handleError(chaindata, e, "getValidatorsAt");
+ await handleError(
+ chaindata,
+ e,
+ "getValidatorsAt",
+ HandlerType.RelayHandler,
+ );
return [];
}
};
@@ -103,20 +107,23 @@ export const getValidatorsAtEra = async (
era: number,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [];
- }
const chainType = await chaindata.getChainType();
if (chainType) {
const [blockHash, err] = await chaindata.findEraBlockHash(era, chainType);
if (blockHash) {
- const apiAt = await chaindata.api?.at(blockHash);
+ const api = await chaindata.handler.getApi();
+ const apiAt = await api.at(blockHash);
return getValidatorsAt(chaindata, apiAt);
}
}
return [];
} catch (e) {
- await handleError(chaindata, e, "getValidatorsAtEra");
+ await handleError(
+ chaindata,
+ e,
+ "getValidatorsAtEra",
+ HandlerType.RelayHandler,
+ );
return [];
}
};
@@ -125,12 +132,10 @@ export const getAssociatedValidatorAddresses = async (
chaindata: Chaindata,
): Promise => {
try {
- if (!(await chaindata.checkApiConnection())) {
- return [];
- }
const addresses: string[] = [];
- const keys = await chaindata.api?.query.staking.validators.keys();
+ const api = await chaindata.handler.getApi();
+ const keys = await api.query.staking.validators.keys();
if (!keys) {
return [];
}
@@ -149,7 +154,12 @@ export const getAssociatedValidatorAddresses = async (
return addresses;
} catch (e) {
- await handleError(chaindata, e, "getAssociatedValidatorAddresses");
+ await handleError(
+ chaindata,
+ e,
+ "getAssociatedValidatorAddresses",
+ HandlerType.RelayHandler,
+ );
return [];
}
};
diff --git a/packages/common/src/config.ts b/packages/common/src/config.ts
index 015a700e4..b34c2e813 100644
--- a/packages/common/src/config.ts
+++ b/packages/common/src/config.ts
@@ -24,10 +24,13 @@ export type ConfigSchema = {
constraints: {
skipConnectionTime: boolean;
skipIdentity: boolean;
- skipClientUpgrade: boolean;
skipUnclaimed: boolean;
skipClaiming: boolean;
- forceClientVersion: string;
+ clientUpgrade: {
+ skip: boolean;
+ releaseTagFormat?: string;
+ forcedVersion?: string;
+ };
minSelfStake: number;
commission: number;
unclaimedEraThreshold: number;
@@ -84,6 +87,7 @@ export type ConfigSchema = {
dryRun: boolean;
networkPrefix: 0 | 2 | 3;
apiEndpoints: string[];
+ apiPeopleEndpoints?: string[];
bootstrap: boolean;
kusamaBootstrapEndpoint: string;
polkadotBootstrapEndpoint: string;
@@ -96,11 +100,6 @@ export type ConfigSchema = {
room: string;
userId: string;
};
- redis: {
- enable: boolean;
- host: string;
- port: number;
- };
proxy: {
timeDelayBlocks: number;
blacklistedAnnouncements: string[];
diff --git a/packages/common/src/constants.ts b/packages/common/src/constants.ts
index bfbffd995..f26f70c14 100644
--- a/packages/common/src/constants.ts
+++ b/packages/common/src/constants.ts
@@ -47,10 +47,6 @@ export const BALANCE_BUFFER_AMOUNT = 20;
// Timeout threshold for polkadot js api - 360 seconds
export const POLKADOT_API_TIMEOUT = 1000000;
-export const CHAINDATA_RETRIES = 20;
-
-export const CHAINDATA_SLEEP = 300;
-
export const API_PROVIDER_TIMEOUT = 4000;
// The number of eras a nominator should wait until making a next nomination
@@ -60,12 +56,14 @@ export const NOMINATOR_SHOULD_NOMINATE_ERAS_THRESHOLD = 1;
export const KusamaEndpoints = [
"wss://kusama-rpc-tn.dwellir.com",
"wss://kusama-rpc.dwellir.com",
- "wss://kusama.public.curie.radiumblock.xyz/ws",
+ "wss://kusama.public.curie.radiumblock.co/ws",
"wss://rpc.ibp.network/kusama",
"wss://rpc.dotters.network/kusama",
"wss://ksm-rpc.stakeworld.io",
];
+export const KusamaPeopleEndpoints = ["wss://kusama-people-rpc.polkadot.io"];
+
/// List of Polkadot endpoints we can switch between.
export const PolkadotEndpoints = [
"wss://rpc.polkadot.io",
@@ -87,6 +85,8 @@ export const defaultWsOptions = {
debug: true,
};
+export const defaultReleaseTagFormat = "polkadot-v\\d+\\.\\d+\\.\\d+"; // => /polkadot-v\d+\.\d+\.\d+/
+
// 2 days in milliseconds
export const STALE_TELEMETRY_THRESHOLD = 172800000;
diff --git a/packages/common/src/constraints/ValidityChecks.ts b/packages/common/src/constraints/ValidityChecks.ts
index 212bec714..a707bb4a2 100644
--- a/packages/common/src/constraints/ValidityChecks.ts
+++ b/packages/common/src/constraints/ValidityChecks.ts
@@ -16,6 +16,7 @@ import {
setUnclaimedInvalidity,
setValidateIntentionValidity,
setSanctionedGeoAreaValidity,
+ ReleaseSchema,
} from "../db";
import { ChainData, Config, Constants, queries, Util } from "../index";
import axios from "axios";
@@ -64,87 +65,107 @@ export const checkValidateIntention = async (
}
};
-// checks that the validator is on the latest client version
-export const checkLatestClientVersion = async (
+const _getLatestRelease = async (
config: Config.ConfigSchema,
+): Promise => {
+ if (config.constraints.clientUpgrade.forcedVersion) {
+ //i.e. useful in case of downgrade necessary
+ return {
+ name: config.constraints.clientUpgrade.forcedVersion,
+ publishedAt: 0,
+ };
+ }
+
+ // Get the latest release from the db or github
+ let latestRelease = await getLatestRelease();
+ if (!latestRelease) {
+ logger.info(
+ `No latest release found, fetching from GitHub`,
+ constraintsLabel,
+ );
+ // fetch from github and set in the db
+ await getLatestTaggedRelease(
+ config.constraints.clientUpgrade.releaseTagFormat,
+ );
+ // get the record from the db
+ latestRelease = await getLatestRelease();
+ logger.info(
+ `Latest release fetched from GitHub: ${latestRelease}`,
+ constraintsLabel,
+ );
+ }
+
+ return latestRelease;
+};
+
+const _checkLatestClientVersion = async (
+ config: Config.ConfigSchema,
+ latestRelease: ReleaseSchema,
candidate: Candidate,
): Promise => {
- try {
- const skipClientUpgrade = config.constraints?.skipClientUpgrade || false;
- if (skipClientUpgrade || candidate?.implementation === "Kagome Node") {
- // Skip the check if the node is a Kagome Client or if skipping client upgrade is enabled
- await setLatestClientReleaseValidity(candidate, true);
- return true;
- }
-
- // The latest release that is manually set in the config (set if there's reasons for people to downgrade)
- const forceLatestRelease = config.constraints.forceClientVersion;
+ if (
+ config.constraints.clientUpgrade.skip ||
+ candidate?.implementation === "Kagome Node"
+ ) {
+ // Skip the check if the node is a Kagome Client or if skipping client upgrade is enabled
+ await setLatestClientReleaseValidity(candidate, true);
+ return true;
+ }
- // Get the latest release from the db or github
- let latestRelease = await getLatestRelease();
- if (!latestRelease) {
- logger.info(
- `No latest release found, fetching from GitHub`,
- constraintsLabel,
- );
- // fetch from github and set in the db
- await getLatestTaggedRelease();
- // get the record from the db
- latestRelease = await getLatestRelease();
- logger.info(
- `Latest release fetched from GitHub: ${latestRelease}`,
- constraintsLabel,
- );
- }
+ if (!latestRelease || !latestRelease.name || !latestRelease.publishedAt) {
+ logger.error(
+ `Latest release isn't properly set, defaulting the validity to true... `,
+ );
+ await setLatestClientReleaseValidity(candidate, true);
+ return true;
+ }
- // Ensure latestRelease contains a valid name
- if (!latestRelease || !latestRelease.name) {
- logger.error(
- `Latest release name is null or undefined: ${latestRelease}`,
- constraintsLabel,
- );
- return false;
- }
+ // Check if there is a latest release and if the current time is past the grace window
+ const isPastGraceWindow =
+ Date.now() > latestRelease.publishedAt + Constants.FORTY_EIGHT_HOURS;
- // Check if there is a latest release and if the current time is past the grace window
- const isPastGraceWindow =
- Date.now() > latestRelease.publishedAt + Constants.FORTY_EIGHT_HOURS;
+ if (isPastGraceWindow) {
+ const nodeVersion = semver.coerce(candidate.version);
+ const latestVersion = semver.clean(latestRelease.name);
- if (isPastGraceWindow) {
- const nodeVersion = semver.coerce(candidate.version);
- const latestVersion = forceLatestRelease
- ? semver.clean(forceLatestRelease)
- : semver.clean(latestRelease.name);
+ logger.info(
+ `Past grace window of latest release, checking latest client version: ${nodeVersion} >= ${latestVersion}`,
+ constraintsLabel,
+ );
- logger.info(
- `Past grace window of latest release, checking latest client version: ${nodeVersion} >= ${latestVersion}`,
- constraintsLabel,
- );
+ // If cannot parse the version, set the release as invalid
+ if (!nodeVersion || !latestVersion) {
+ await setLatestClientReleaseValidity(candidate, false);
+ return false;
+ }
- // If cannot parse the version, set the release as invalid
- if (!nodeVersion || !latestVersion) {
- await setLatestClientReleaseValidity(candidate, false);
- return false;
- }
+ const isUpgraded = semver.gte(nodeVersion, latestVersion);
- const isUpgraded = semver.gte(nodeVersion, latestVersion);
+ // If they are not upgraded, set the validity as invalid
+ if (!isUpgraded) {
+ await setLatestClientReleaseValidity(candidate, false);
+ return false;
+ }
- // If they are not upgraded, set the validity as invalid
- if (!isUpgraded) {
- await setLatestClientReleaseValidity(candidate, false);
- return false;
- }
+ // If the current version is the latest release, set the release as valid
+ await setLatestClientReleaseValidity(candidate, true);
+ return true;
+ } else {
+ logger.info(`Still in grace window of latest release`, constraintsLabel);
- // If the current version is the latest release, set the release as valid
- await setLatestClientReleaseValidity(candidate, true);
- return true;
- } else {
- logger.info(`Still in grace window of latest release`, constraintsLabel);
+ await setLatestClientReleaseValidity(candidate, true);
+ return true;
+ }
+};
- // If not past the grace window, set the release as invalid
- await setLatestClientReleaseValidity(candidate, true);
- return true;
- }
+// checks that the validator is on the latest client version
+export const checkLatestClientVersion = async (
+ config: Config.ConfigSchema,
+ candidate: Candidate,
+): Promise => {
+ try {
+ const latestRelease = await _getLatestRelease(config);
+ return await _checkLatestClientVersion(config, latestRelease, candidate);
} catch (e) {
logger.error(
`Error checking latest client version: ${e}`,
@@ -381,7 +402,7 @@ export const checkKusamaRank = async (
}
if (Number(res.data.rank) < Constants.KUSAMA_RANK_VALID_THRESHOLD) {
- const invalidityReason = `${candidate.name} has a Kusama stash with lower than 25 rank in the Kusama OTV programme: ${res.data.rank}.`;
+ const invalidityReason = `${candidate.name} has a Kusama stash with lower than 100 rank in the Kusama 1KV programme: ${res.data.rank}.`;
await setKusamaRankInvalidity(candidate, false, invalidityReason);
return false;
}
diff --git a/packages/common/src/constraints/constraints.ts b/packages/common/src/constraints/constraints.ts
index a321c1fd3..3f2800fff 100644
--- a/packages/common/src/constraints/constraints.ts
+++ b/packages/common/src/constraints/constraints.ts
@@ -1,5 +1,4 @@
import { ChainData, Config, Constants } from "../index";
-import ApiHandler from "../ApiHandler/ApiHandler";
import { setScoreMetadata } from "./ScoreMetadata";
import { checkAllCandidates, checkCandidate } from "./CheckCandidates";
import {
@@ -46,8 +45,8 @@ export class OTV implements Constraints {
CLIENT_WEIGHT: Constants.CLIENT_WEIGHT,
};
- constructor(handler: ApiHandler, config: Config.ConfigSchema) {
- this.chaindata = new ChainData(handler);
+ constructor(chaindata: ChainData, config: Config.ConfigSchema) {
+ this.chaindata = chaindata;
this.config = config;
// Constraints
diff --git a/packages/common/src/db/models.ts b/packages/common/src/db/models.ts
index dcd03ea5c..2555de3df 100644
--- a/packages/common/src/db/models.ts
+++ b/packages/common/src/db/models.ts
@@ -943,6 +943,11 @@ export const ValidatorScoreMetadataModel = mongoose.model(
ValidatorScoreMetadataSchema,
);
+export interface ReleaseSchema {
+ name: string;
+ publishedAt: number;
+}
+
export const ReleaseSchema = new Schema({
name: String,
publishedAt: Number,
diff --git a/packages/common/src/db/queries/Release.ts b/packages/common/src/db/queries/Release.ts
index a074f4762..b90fa1fed 100644
--- a/packages/common/src/db/queries/Release.ts
+++ b/packages/common/src/db/queries/Release.ts
@@ -1,5 +1,5 @@
import logger from "../../logger";
-import { ReleaseModel } from "../models";
+import { ReleaseModel, ReleaseSchema } from "../models";
export const setRelease = async (
name: string,
@@ -16,11 +16,11 @@ export const setRelease = async (
return data;
};
-export const getLatestRelease = async (): Promise => {
+export const getLatestRelease = async (): Promise => {
try {
const latestRelease = await ReleaseModel.findOne({})
.sort("-publishedAt")
- .lean()
+ .lean()
.limit(1);
return latestRelease;
} catch (error) {
diff --git a/packages/common/src/index.ts b/packages/common/src/index.ts
index 030eb8ccc..fb7678ebb 100644
--- a/packages/common/src/index.ts
+++ b/packages/common/src/index.ts
@@ -13,7 +13,6 @@ import * as Score from "./constraints/score";
import * as Models from "./db/models";
import ScoreKeeper from "./scorekeeper/scorekeeper";
import * as Jobs from "./scorekeeper/jobs/specificJobs";
-import Monitor from "./monitor";
import MatrixBot from "./matrix";
export {
@@ -32,5 +31,4 @@ export {
ScoreKeeper,
Jobs,
MatrixBot,
- Monitor,
};
diff --git a/packages/common/src/monitor.ts b/packages/common/src/monitor.ts
deleted file mode 100644
index 84cd13d74..000000000
--- a/packages/common/src/monitor.ts
+++ /dev/null
@@ -1,124 +0,0 @@
-import { Octokit } from "@octokit/rest";
-import semver from "semver";
-
-import { logger, queries } from "./index";
-
-type TaggedRelease = {
- name: string;
- publishedAt: number;
-};
-
-export default class Monitor {
- public grace: number;
- public latestTaggedRelease: TaggedRelease | null = null;
-
- private ghApi: any;
-
- constructor(grace: number) {
- this.grace = grace;
- this.ghApi = new Octokit();
- }
-
- public async getLatestTaggedRelease(): Promise {
- logger.info("(Monitor::getLatestTaggedRelease) Fetching latest release");
- let latestRelease;
-
- try {
- latestRelease = await this.ghApi.repos.getLatestRelease({
- owner: "paritytech",
- repo: "polkadot-sdk",
- });
- } catch (e) {
- logger.info(JSON.stringify(e));
- logger.info(
- "{Monitor::getLatestTaggedRelease} Could not get latest release.",
- );
- }
-
- if (!latestRelease) return null;
- const { tag_name, published_at } = latestRelease.data;
- const publishedAt = new Date(published_at).getTime();
-
- // Extract version number from the tag name
- const versionMatch = tag_name.match(/v?(\d+\.\d+\.\d+)/);
- if (!versionMatch) {
- logger.warn(`Unable to extract version from tag name: ${tag_name}`);
- return null;
- }
- const version = versionMatch[1]; // Extracted version number
-
- await queries.setRelease(version, publishedAt);
-
- if (
- this.latestTaggedRelease &&
- version === this.latestTaggedRelease!.name
- ) {
- logger.info("(Monitor::getLatestTaggedRelease) No new release found");
- return null;
- }
-
- this.latestTaggedRelease = {
- name: version,
- publishedAt,
- };
-
- logger.info(
- `(Monitor::getLatestTaggedRelease) Latest release updated: ${version} | Published at: ${publishedAt}`,
- );
-
- return this.latestTaggedRelease;
- }
-
- /// Ensures that nodes have upgraded within a `grace` period.
- public async ensureUpgrades(): Promise {
- // If there is no tagged release stored in state, fetch it now.
- if (!this.latestTaggedRelease) {
- await this.getLatestTaggedRelease();
- }
-
- const now = new Date().getTime();
- const nodes = await queries.allCandidates();
-
- for (const node of nodes) {
- const { name, version, updated } = node;
-
- const nodeVersion = semver.coerce(version);
- const latestVersion = semver.clean(
- this.latestTaggedRelease?.name?.split(`-`)[0] || "",
- );
- if (latestVersion && nodeVersion) {
- logger.debug(
- `(Monitor::ensureUpgrades) ${name} | version: ${nodeVersion} latest: ${latestVersion}`,
- );
-
- if (!nodeVersion) {
- if (updated) {
- await queries.reportNotUpdated(name);
- }
- continue;
- }
-
- const isUpgraded = semver.gte(nodeVersion, latestVersion);
-
- if (isUpgraded) {
- if (!updated) {
- await queries.reportUpdated(name);
- }
- continue;
- }
-
- const published = this.latestTaggedRelease?.publishedAt || 0;
- if (now < published + this.grace) {
- // Still in grace, but check if the node is only one patch version away.
- const incremented = semver.inc(nodeVersion, "patch") || "";
- if (semver.gte(incremented, latestVersion)) {
- await queries.reportUpdated(name);
- continue;
- }
- }
-
- await queries.reportNotUpdated(name);
- }
- }
- }
-}
diff --git a/packages/common/src/nominator/NominatorTx.ts b/packages/common/src/nominator/NominatorTx.ts
index 5df229a98..db1c6abc2 100644
--- a/packages/common/src/nominator/NominatorTx.ts
+++ b/packages/common/src/nominator/NominatorTx.ts
@@ -14,7 +14,6 @@ export const sendProxyDelayTx = async (
nominator: Nominator,
targets: string[],
chaindata: ChainData,
- api: ApiPromise,
): Promise => {
try {
logger.info(
@@ -28,6 +27,9 @@ export const sendProxyDelayTx = async (
stale: false,
});
+ // TODO: chain interaction should be performed exclusively in ChainData
+ const api = await chaindata.handler.getApi();
+
const innerTx = api?.tx.staking.nominate(targets);
const currentBlock = await chaindata.getLatestBlock();
@@ -98,7 +100,6 @@ export const sendProxyTx = async (
nominator: Nominator,
targets: string[],
chaindata: ChainData,
- api: ApiPromise,
bot?: MatrixBot,
): Promise => {
try {
@@ -108,6 +109,9 @@ export const sendProxyTx = async (
nominatorLabel,
);
+ // TODO: chain interaction should be performed exclusively in ChainData
+ const api = await chaindata.handler.getApi();
+
const innerTx = api?.tx.staking.nominate(targets);
const callHash = innerTx.method.hash.toString();
diff --git a/packages/common/src/nominator/__mocks__/nominator.ts b/packages/common/src/nominator/__mocks__/nominator.ts
index ed8e876b4..d9589f8c0 100644
--- a/packages/common/src/nominator/__mocks__/nominator.ts
+++ b/packages/common/src/nominator/__mocks__/nominator.ts
@@ -1,7 +1,7 @@
import Keyring from "@polkadot/keyring";
import { KeyringPair } from "@polkadot/keyring/types";
import ApiHandler from "../../ApiHandler/ApiHandler";
-import { Types } from "../../index";
+import { ChainData, Types } from "../../index";
type Stash = string; // Simplified for example purposes
@@ -17,7 +17,7 @@ class NominatorMock {
private _bondedAddress: string;
private bot: any;
- private handler: ApiHandler;
+ public chaindata: ChainData;
private signer: KeyringPair;
// Use proxy of controller instead of controller directly.
@@ -34,12 +34,12 @@ class NominatorMock {
private _nominationNum = 0;
constructor(
- handler: ApiHandler,
+ chaindata: ChainData,
cfg: Types.NominatorConfig,
networkPrefix = 2,
bot: any,
) {
- this.handler = handler;
+ this.chaindata = chaindata;
this.bot = bot;
this._isProxy = cfg.isProxy || false;
diff --git a/packages/common/src/nominator/nominator.ts b/packages/common/src/nominator/nominator.ts
index 371b0740d..2407154c2 100644
--- a/packages/common/src/nominator/nominator.ts
+++ b/packages/common/src/nominator/nominator.ts
@@ -2,7 +2,6 @@ import { SubmittableExtrinsic } from "@polkadot/api/types";
import Keyring from "@polkadot/keyring";
import { KeyringPair } from "@polkadot/keyring/types";
-import ApiHandler from "../ApiHandler/ApiHandler";
import { ChainData, Constants, queries, Types } from "../index";
import logger from "../logger";
import EventEmitter from "eventemitter3";
@@ -17,7 +16,6 @@ export default class Nominator extends EventEmitter {
private _bondedAddress: string;
private bot: any;
- private handler: ApiHandler;
public chaindata: ChainData;
private signer: KeyringPair;
@@ -50,15 +48,14 @@ export default class Nominator extends EventEmitter {
};
constructor(
- handler: ApiHandler,
+ chaindata: ChainData,
cfg: Types.NominatorConfig,
networkPrefix = 2,
bot?: any,
dryRun = false,
) {
super();
- this.handler = handler;
- this.chaindata = new ChainData(handler);
+ this.chaindata = chaindata;
this.bot = bot;
this._isProxy = cfg.isProxy || false;
this._dryRun = dryRun || false;
@@ -200,8 +197,9 @@ export default class Nominator extends EventEmitter {
public async stash(): Promise {
try {
- const api = this.handler.getApi();
- const ledger = await api?.query.staking.ledger(this.bondedAddress);
+ // TODO: chain interaction should be performed exclusively in ChainData
+ const api = await this.chaindata.handler.getApi();
+ const ledger = await api.query.staking.ledger(this.bondedAddress);
if (ledger !== undefined && !ledger.isSome) {
logger.warn(`Account ${this.bondedAddress} is not bonded!`);
@@ -226,11 +224,6 @@ export default class Nominator extends EventEmitter {
}
public async payee(): Promise {
- const api = this.handler.getApi();
- if (!api) {
- logger.error(`Error getting API in payee`, nominatorLabel);
- return "";
- }
try {
const stash = await this.stash();
const isBonded = await this.chaindata.isBonded(stash);
@@ -294,14 +287,6 @@ export default class Nominator extends EventEmitter {
public async nominate(targets: Types.Stash[]): Promise {
try {
- const now = new Date().getTime();
-
- const api = this.handler.getApi();
- if (!api) {
- logger.error(`Error getting API in nominate`, nominatorLabel);
- return false;
- }
-
const currentEra = (await this.chaindata.getCurrentEra()) || 0;
const nominatorStatus: NominatorStatus = {
state: NominatorState.Nominating,
@@ -350,20 +335,22 @@ export default class Nominator extends EventEmitter {
`Starting a delayed proxy tx for ${this.bondedAddress}`,
nominatorLabel,
);
- await sendProxyDelayTx(this, targets, this.chaindata, api);
+ await sendProxyDelayTx(this, targets, this.chaindata);
} else if (this._isProxy && this._proxyDelay == 0) {
logger.info(
`Starting a non delayed proxy tx for ${this.bondedAddress}`,
nominatorLabel,
);
// Start a non delay proxy tx
- await sendProxyTx(this, targets, this.chaindata, api, this.bot);
+ await sendProxyTx(this, targets, this.chaindata, this.bot);
} else {
logger.info(
`Starting a non proxy tx for ${this.bondedAddress}`,
nominatorLabel,
);
// Do a non-proxy tx
+ // TODO: chain interaction should be performed exclusively in ChainData
+ const api = await this.chaindata.handler.getApi();
tx = api.tx.staking.nominate(targets);
await this.sendStakingTx(tx, targets);
}
@@ -380,11 +367,8 @@ export default class Nominator extends EventEmitter {
callHash: string;
height: number;
}): Promise {
- const api = this.handler.getApi();
- if (!api) {
- logger.error(`Error getting API in cancelTx`, nominatorLabel);
- return false;
- }
+ // TODO: chain interaction should be performed exclusively in ChainData
+ const api = await this.chaindata.handler.getApi();
const tx = api.tx.proxy.removeAnnouncement(
announcement.real,
announcement.callHash,
@@ -457,11 +441,6 @@ export default class Nominator extends EventEmitter {
return [false, "dryRun"];
}
const now = new Date().getTime();
- const api = this.handler.getApi();
- if (!api) {
- logger.error(`Error getting API in sendStakingTx`, nominatorLabel);
- return [false, "error getting api to send staking tx"];
- }
let didSend = true;
let finalizedBlockHash: string | undefined;
@@ -499,6 +478,8 @@ export default class Nominator extends EventEmitter {
`{Nominator::nominate} tx is finalized in block ${finalizedBlockHash}`,
);
+ // TODO: chain interaction should be performed exclusively in ChainData
+ const api = await this.chaindata.handler.getApi();
for (const event of events) {
if (
event.event &&
diff --git a/packages/common/src/scorekeeper/Nominating.ts b/packages/common/src/scorekeeper/Nominating.ts
index 88e1776c5..d5bc467f8 100644
--- a/packages/common/src/scorekeeper/Nominating.ts
+++ b/packages/common/src/scorekeeper/Nominating.ts
@@ -7,7 +7,6 @@ import { autoNumNominations } from "./NumNominations";
import { scorekeeperLabel } from "./scorekeeper";
import logger from "../logger";
import { ChainData, queries, Util } from "../index";
-import ApiHandler from "../ApiHandler/ApiHandler";
import MatrixBot from "../matrix";
import { ConfigSchema } from "../config";
import Nominator from "../nominator/nominator";
@@ -18,7 +17,6 @@ export const doNominations = async (
candidates: { name: string; stash: string; total: number }[],
nominatorGroups: Nominator[],
chaindata: ChainData,
- handler: ApiHandler,
bot: MatrixBot,
config: ConfigSchema,
currentTargets: { name?: string; stash?: string; identity?: any }[],
@@ -70,10 +68,9 @@ export const doNominations = async (
// The number of nominations to do per nominator account
// This is either hard coded, or set to "auto", meaning it will find a dynamic amount of validators
// to nominate based on the lowest staked validator in the validator set
- const api = handler.getApi();
const denom = await chaindata.getDenom();
- if (!api || !denom) return null;
- const autoNom = await autoNumNominations(api, nominator);
+ if (!denom) return null;
+ const autoNom = await autoNumNominations(nominator);
const { nominationNum } = autoNom;
logger.info(
diff --git a/packages/common/src/scorekeeper/NumNominations.ts b/packages/common/src/scorekeeper/NumNominations.ts
index 193ad009f..54145f4eb 100644
--- a/packages/common/src/scorekeeper/NumNominations.ts
+++ b/packages/common/src/scorekeeper/NumNominations.ts
@@ -19,7 +19,6 @@ import { NominatorState, NominatorStatus } from "../types";
* buffer percentage and additional nominations desired. This function is chain-aware and adjusts its logic and limits
* based on whether it's operating on Polkadot or another chain.
*
- * @param {ApiPromise} api - An instance of the ApiPromise from Polkadot.js API, connected to the target chain.
* @param {Nominator} nominator - An object representing the nominator, including methods to get the stash account and nominate validators.
* @returns {Promise