From 5d22f85864f7a5e0111cafd898d1a3208e2ef275 Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Mon, 12 Aug 2024 12:56:30 +0000 Subject: [PATCH 01/10] implemented chronicler and tests --- .devcontainer/devcontainer.json | 2 +- Makefile | 1 + .../templates/deployment.yaml | 66 +++ .../templates/network-config.yaml | 1 + chart/run_kind_test.sh | 69 ++- chart/templates/_database-config.yaml | 40 ++ chart/templates/cnpg-cluster.yaml | 18 - chart/templates/deployment.yaml | 87 ++-- chart/templates/migrate-job.yaml | 27 +- chart/templates/network-config.yaml | 10 + chart/templates/service.yaml | 6 +- chart/tests/deployment_test.yaml | 98 ++++ chart/tests/network_config_test.yaml | 36 ++ chart/values.yaml | 139 +++--- src/Chronicler.Dockerfile | 1 + .../BlockReaderBackgroundService.cs | 51 ++ .../BlockReader/BlockReaderJob.cs | 179 +++++++ .../BlockReader/IBlockReaderJob.cs | 10 + .../DatabaseScripts/1.sql | 52 +- .../Exceptions/RegistryNotKnownException.cs | 12 + .../IRegistryClientFactory.cs | 9 + .../IRegistryService.cs | 8 + .../Models/CertificateInfo.cs | 12 + .../Models/ClaimAllocation.cs | 12 + .../Models/ClaimIntent.cs | 2 +- .../Models/ClaimRecord.cs | 12 + .../Models/FederatedCertificateId.cs | 33 ++ .../Models/ReadBlock.cs | 10 + .../Options/ChroniclerOptions.cs | 6 +- .../Options/NetworkOptions.cs | 8 + .../ProjectOrigin.Chronicler.Server.csproj | 6 + .../RegistryClientFactory.cs | 63 +++ .../RegistryService.cs | 28 ++ .../Repositories/ChroniclerRepository.cs | 137 +++++- .../Repositories/IChroniclerRepository.cs | 20 +- .../Services/ChroniclerService.cs | 7 +- .../Startup.cs | 15 +- .../UriOptionsLoader/ConfigureExtensions.cs | 26 + .../UriOptionsLoader/UriOptions.cs | 12 + .../UriOptionsChangeTokenSource.cs | 21 + .../UriOptionsLoader/UriOptionsConfigure.cs | 18 + .../UriOptionsLoaderService.cs | 130 +++++ .../appsettings.json | 6 +- .../BlockReaderBackgroundServiceTests.cs | 89 ++++ .../BlockReaderJobTests.cs | 458 ++++++++++++++++++ .../ChroniclerOptionsTests.cs | 13 +- .../ChroniclerRepositoryTests.cs | 379 +++++++++++++++ .../ChroniclerServiceTests.cs | 18 +- .../MicrosecondDateTimeOffsetGenerator.cs | 20 + .../RegistryClientFactoryTests.cs | 111 +++++ .../RegistryServiceTests.cs | 105 ++++ .../UriOptionsLoaderTests.cs | 183 +++++++ src/Protos/chronicler.proto | 2 +- src/Protos/electricity.proto | 91 ++++ src/Protos/registry.proto | 174 +++++++ 55 files changed, 2968 insertions(+), 181 deletions(-) create mode 100644 chart/.debug/project-origin-chronicler/templates/deployment.yaml create mode 100644 chart/.debug/project-origin-chronicler/templates/network-config.yaml create mode 100644 chart/templates/_database-config.yaml delete mode 100644 chart/templates/cnpg-cluster.yaml create mode 100644 chart/templates/network-config.yaml create mode 100644 chart/tests/deployment_test.yaml create mode 100644 chart/tests/network_config_test.yaml create mode 100644 src/ProjectOrigin.Chronicler.Server/BlockReader/BlockReaderBackgroundService.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/BlockReader/BlockReaderJob.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/BlockReader/IBlockReaderJob.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/Exceptions/RegistryNotKnownException.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/IRegistryClientFactory.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/IRegistryService.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/Models/CertificateInfo.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/Models/ClaimAllocation.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/Models/ClaimRecord.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/Models/FederatedCertificateId.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/Models/ReadBlock.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/Options/NetworkOptions.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/RegistryClientFactory.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/RegistryService.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/ConfigureExtensions.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptions.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsChangeTokenSource.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsConfigure.cs create mode 100644 src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsLoaderService.cs create mode 100644 src/ProjectOrigin.Chronicler.Test/BlockReaderBackgroundServiceTests.cs create mode 100644 src/ProjectOrigin.Chronicler.Test/BlockReaderJobTests.cs create mode 100644 src/ProjectOrigin.Chronicler.Test/ChroniclerRepositoryTests.cs create mode 100644 src/ProjectOrigin.Chronicler.Test/FixtureCustomizations/MicrosecondDateTimeOffsetGenerator.cs create mode 100644 src/ProjectOrigin.Chronicler.Test/RegistryClientFactoryTests.cs create mode 100644 src/ProjectOrigin.Chronicler.Test/RegistryServiceTests.cs create mode 100644 src/ProjectOrigin.Chronicler.Test/UriOptionsLoaderTests.cs create mode 100644 src/Protos/electricity.proto create mode 100644 src/Protos/registry.proto diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index a27db92..0376abe 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -12,7 +12,7 @@ // Use 'forwardPorts' to make a list of ports inside the container available locally. // "forwardPorts": [5000, 5001], // Use 'postCreateCommand' to run commands after the container is created. - "postCreateCommand": "make restore", + "postCreateCommand": "helm plugin install https://github.com/helm-unittest/helm-unittest.git && make restore", // Configure tool-specific properties. "customizations": { "vscode": { diff --git a/Makefile b/Makefile index fa6fa21..5b2de76 100644 --- a/Makefile +++ b/Makefile @@ -66,4 +66,5 @@ unit-test: verify-chart: @kind version >/dev/null 2>&1 || { echo >&2 "kind not installed! kind is required to use recipe, please install or use devcontainer"; exit 1;} @helm version >/dev/null 2>&1 || { echo >&2 "helm not installed! helm is required to use recipe, please install or use devcontainer"; exit 1;} + helm unittest chart chart/run_kind_test.sh diff --git a/chart/.debug/project-origin-chronicler/templates/deployment.yaml b/chart/.debug/project-origin-chronicler/templates/deployment.yaml new file mode 100644 index 0000000..06789f6 --- /dev/null +++ b/chart/.debug/project-origin-chronicler/templates/deployment.yaml @@ -0,0 +1,66 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: po-chronicler-deployment + namespace: NAMESPACE + labels: + app: po-chronicler +spec: + replicas: 3 + selector: + matchLabels: + app: po-chronicler + strategy: + type: Recreate + template: + metadata: + labels: + app: po-chronicler + spec: + serviceAccountName: chronicler-migration-waiter + initContainers: + - name: wait-for-migration + image: groundnuty/k8s-wait-for:v2.0 + args: + - "job" + - "po-chronicler-migrate-job-0-1-0" + containers: + - name: po-chronicler + image: ghcr.io/project-origin/chronicler:0.1.0 + args: + - "--serve" + env: + # Database Configuration + - name: DB_HOST + value: "_" + - name: DB_PORT + value: "_" + - name: DB_DATABASE + value: "_" + - name: DB_USERNAME + value: "_" + - name: DB_PASSWORD + value: "_" + + # OpenTelemetry Collector Configuration + - name: Otlp__Enabled + value: "false" + - name: Otlp__Endpoint + value: http://opentelemetry-collector.otel-collector:4317 + + # Chronicler Configuration + - name: NetworkConfigurationUri + value: /etc/config/networkConfiguration.json + volumeMounts: + - name: config-volume + mountPath: /etc/config + subPath: networkConfiguration.json + + volumes: + - name: config-volume + configMap: + name: po-chronicler-deployment-config + items: + - key: networkConfiguration.json + path: networkConfiguration.json diff --git a/chart/.debug/project-origin-chronicler/templates/network-config.yaml b/chart/.debug/project-origin-chronicler/templates/network-config.yaml new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/chart/.debug/project-origin-chronicler/templates/network-config.yaml @@ -0,0 +1 @@ + diff --git a/chart/run_kind_test.sh b/chart/run_kind_test.sh index 0093046..60488a3 100755 --- a/chart/run_kind_test.sh +++ b/chart/run_kind_test.sh @@ -9,22 +9,48 @@ cluster_name=chronicler-test # Ensures script fails if something goes wrong. set -eo pipefail -# cleanup - delete temp_folder and cluster -trap 'rm -fr $temp_folder; kind delete cluster -n ${cluster_name} >/dev/null 2>&1' 0 +# define cleanup function +cleanup() { + rm -fr $temp_folderx + kind delete cluster -n ${cluster_name} >/dev/null 2>&1 +} + +# define debug function +debug() { + echo -e "\nDebugging information:" + echo -e "\nHelm status:" + helm status chronicler -n chronicler --show-desc --show-resources + + echo -e "\nDeployment description:" + kubectl describe deployment -n chronicler po-chronicler-deployment + + POD_NAMES=$(kubectl get pods -n chronicler -l app=po-chronicler -o jsonpath="{.items[*].metadata.name}") + # Loop over the pods and print their logs + for POD_NAME in $POD_NAMES + do + echo -e "\nLogs for $POD_NAME:" + kubectl logs -n chronicler $POD_NAME + done +} + +# trap cleanup function on script exit +trap 'cleanup' 0 +trap 'debug; cleanup' ERR # define variables temp_folder=$(mktemp -d) values_filename=${temp_folder}/values.yaml +secret_filename=${temp_folder}/secret.yaml # create kind cluster kind delete cluster -n ${cluster_name} kind create cluster -n ${cluster_name} -# install rabbitmq-operator -kubectl apply -f "https://github.com/rabbitmq/cluster-operator/releases/download/v2.5.0/cluster-operator.yml" +# create namespace +kubectl create namespace chronicler -# install cnpg-operator -helm install cnpg-operator cloudnative-pg --repo https://cloudnative-pg.io/charts --version 0.18.0 --namespace cnpg --create-namespace --wait +# install postgresql chart +helm install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql --namespace chronicler # build docker image docker build -f src/Chronicler.Dockerfile -t ghcr.io/project-origin/chronicler:test src/ @@ -32,16 +58,41 @@ docker build -f src/Chronicler.Dockerfile -t ghcr.io/project-origin/chronicler:t # load docker image into cluster kind load -n ${cluster_name} docker-image ghcr.io/project-origin/chronicler:test +# generate keys +openssl genpkey -algorithm ED25519 > ${secret_filename} + +# generate secret +kubectl create secret generic signing-key --from-file=my-key=${secret_filename} --namespace chronicler + # generate values.yaml file cat << EOF > "${values_filename}" image: tag: test +replicaCount: 1 +config: + signingKeySecret: + name: signing-key + key: my-key + gridAreas: + - narnia + networkConfigurationFile: |- + { + "RegistryUrls": { + "narniaRegistry": "https://registry.narnia.example.com", + } + } +postgresql: + host: postgresql + database: postgres + username: postgres + password: + secretRef: + name: postgresql + key: postgres-password -messageBroker: - type: rabbitmqOperator EOF # install chronicler chart -helm install chronicler ./chart --values ${values_filename} --namespace chronicler --create-namespace --wait +helm install chronicler ./chart --values ${values_filename} --namespace chronicler --wait echo "Test completed successfully ✅" diff --git a/chart/templates/_database-config.yaml b/chart/templates/_database-config.yaml new file mode 100644 index 0000000..58d7452 --- /dev/null +++ b/chart/templates/_database-config.yaml @@ -0,0 +1,40 @@ +{{- define "checkKey" -}} + {{- $value := index . 0 -}} + {{- $name := index . 1 -}} + + {{- if kindIs "string" $value }} + value: {{ required (printf "The value for %s is not provided" $name) $value | quote }} + {{- else if and (hasKey $value "secretRef") (hasKey $value "configMapRef") }} + {{ fail (printf "Both secretRef and configMapRef are defined for %s" $name) }} + {{- else if hasKey $value "secretRef" }} + valueFrom: + secretKeyRef: + name: {{ required (printf "The value for %s.secretRef.name is not provided" $name) $value.secretRef.name | quote }} + key: {{ required (printf "The value for %s.secretRef.key is not provided" $name) $value.secretRef.key | quote }} + {{- else if hasKey $value "configMapRef" }} + valueFrom: + configMapKeyRef: + name: {{ required (printf "The value for %s.configMapRef.name is not provided" $name) $value.configMapRef.name | quote }} + key: {{ required (printf "The value for %s.configMapRef.name is not provided" $name) $value.configMapRef.key | quote }} + {{- else }} + {{ fail (printf "The value for %s is not provided" $name) }} + {{- end }} +{{- end -}} + +{{- define "database.config" -}} +# Database Configuration +- name: DB_HOST + {{- include "checkKey" (list .Values.postgresql.host ".postgresql.host") }} +- name: DB_PORT + {{- include "checkKey" (list .Values.postgresql.port ".postgresql.port") }} +- name: DB_DATABASE + {{- include "checkKey" (list .Values.postgresql.database ".postgresql.database") }} +- name: DB_USERNAME + {{- include "checkKey" (list .Values.postgresql.username ".postgresql.username") }} +- name: DB_PASSWORD + {{- include "checkKey" (list .Values.postgresql.password ".postgresql.password") }} +- name: ConnectionStrings__Database + value: Host=$(DB_HOST); Port=$(DB_PORT); Database=$(DB_DATABASE); Username=$(DB_USERNAME); Password=$(DB_PASSWORD); +{{- end }} + + diff --git a/chart/templates/cnpg-cluster.yaml b/chart/templates/cnpg-cluster.yaml deleted file mode 100644 index 023d2a4..0000000 --- a/chart/templates/cnpg-cluster.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -{{- if and (eq .Values.persistence.type "CloudNativePG") }} -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -metadata: - name: {{ .Values.persistence.cloudNativePG.name }} - namespace: {{ .Release.Namespace }} -spec: - instances: {{ .Values.persistence.cloudNativePG.replicas }} - storage: - size: {{ .Values.persistence.cloudNativePG.size }} - bootstrap: - initdb: - database: {{ .Values.persistence.cloudNativePG.database }} - owner: {{ .Values.persistence.cloudNativePG.owner }} - monitoring: - enablePodMonitor: true -{{- end }} diff --git a/chart/templates/deployment.yaml b/chart/templates/deployment.yaml index 52cb758..e28f379 100644 --- a/chart/templates/deployment.yaml +++ b/chart/templates/deployment.yaml @@ -1,5 +1,4 @@ --- -{{- $tag := .Values.image.tag | default .Chart.AppVersion -}} apiVersion: apps/v1 kind: Deployment metadata: @@ -8,7 +7,7 @@ metadata: labels: app: po-chronicler spec: - replicas: {{ .Values.chronicler.replicaCount }} + replicas: {{ .Values.replicaCount }} selector: matchLabels: app: po-chronicler @@ -22,7 +21,7 @@ spec: serviceAccountName: chronicler-migration-waiter initContainers: - name: wait-for-migration - image: groundnuty/k8s-wait-for:v2.0 # TODO verify this image + image: groundnuty/k8s-wait-for:v2.0 args: - "job" - "po-chronicler-migrate-job-{{ .Values.image.tag | default .Chart.AppVersion | replace "." "-" }}" @@ -32,42 +31,58 @@ spec: args: - "--serve" env: - {{- range .Values.registries }} - - name: RegistryUrls__{{ .name }} - value: {{ .address }} - {{- end }} - - {{- if eq .Values.persistence.type "CloudNativePG" }} - - name: DB_HOST - value: {{ .Values.persistence.cloudNativePG.name }}-rw - - name: DB_PORT - value: "5432" - - name: DB_DATABASE - value: {{ .Values.persistence.cloudNativePG.database }} - - name: DB_USERNAME - valueFrom: - secretKeyRef: - name: {{ .Values.persistence.cloudNativePG.name }}-app - key: username - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.persistence.cloudNativePG.name }}-app - key: password - - name: ConnectionStrings__Database - value: "Host=$(DB_HOST);Port=$(DB_PORT);Database=$(DB_DATABASE);Username=$(DB_USERNAME);Password=$(DB_PASSWORD);" - {{- else if eq .Values.persistence.type "BYOD" }} - - name: ConnectionStrings__Database - valueFrom: - secretKeyRef: - name: {{ required "BYOD is selected as the database type, but no secretName is provided in persistence.byod" .Values.persistence.byod.secretName }} - key: {{ required "BYOD is selected as the database type, but no secretKey is provided in persistence.byod" .Values.persistence.byod.secretKey }} - {{- else }} - {{- fail "Unsupported database type specified. Please specify 'persistence.type' as either 'CloudNativePG' or 'BYOD'." }} - {{- end }} + {{- include "database.config" . | nindent 12 }} # OpenTelemetry Collector Configuration - name: Otlp__Enabled value: {{ .Values.otlp.enabled | quote }} - name: Otlp__Endpoint value: {{ .Values.otlp.endpoint }} + + # Chronicler Configuration + {{- if .Values.config.networkConfigurationFile }} + - name: NetworkConfigurationUri + value: /etc/config/networkConfiguration.json + {{- else if .Values.config.networkConfigurationUri }} + - name: NetworkConfigurationUri + value: {{ .Values.config.networkConfigurationUri }} + {{- else }} + {{ fail "No network configuration provided" }} + {{- end }} + + - name: chronicler__JobInterval + value: {{ .Values.config.jobInterval }} + + - name: chronicler__SigningKey + value: /etc/secret/signing-key + + {{- range $i, $area := .Values.config.gridAreas }} + - name: chronicler__GridAreas__{{ $i }} + value: {{ $area }} + {{- end }} + volumeMounts: + - name: signing-key-volume + mountPath: /etc/secret + subPath: signing-key + {{- if .Values.config.networkConfigurationFile }} + - name: config-volume + mountPath: /etc/config + subPath: networkConfiguration.json + {{- end }} + + + volumes: + - name: signing-key-volume + secret: + secretName: {{ .Values.config.signingKeySecret.name }} + items: + - key: {{ .Values.config.signingKeySecret.key }} + path: signing-key + {{- if .Values.config.networkConfigurationFile }} + - name: config-volume + configMap: + name: po-chronicler-deployment-config + items: + - key: networkConfiguration.json + path: networkConfiguration.json + {{- end }} diff --git a/chart/templates/migrate-job.yaml b/chart/templates/migrate-job.yaml index 73bc0cd..52e95ce 100644 --- a/chart/templates/migrate-job.yaml +++ b/chart/templates/migrate-job.yaml @@ -17,29 +17,4 @@ spec: args: - "--migrate" env: - {{- if eq .Values.persistence.type "CloudNativePG" }} - - name: DB_HOST - value: {{ .Values.persistence.cloudNativePG.name }}-rw - - name: DB_PORT - value: "5432" - - name: DB_DATABASE - value: {{ .Values.persistence.cloudNativePG.database }} - - name: DB_USERNAME - valueFrom: - secretKeyRef: - name: {{ .Values.persistence.cloudNativePG.name }}-app - key: username - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.persistence.cloudNativePG.name }}-app - key: password - - name: ConnectionStrings__Database - value: Host=$(DB_HOST); Port=$(DB_PORT); Database=$(DB_DATABASE); Username=$(DB_USERNAME); Password=$(DB_PASSWORD); - {{- else if eq .Values.persistence.type "BYOD" }} - - name: ConnectionStrings__Database - valueFrom: - secretKeyRef: - name: {{ .Values.persistence.byod.secretName }} - key: {{ .Values.persistence.byod.secretKey }} - {{ end }} + {{- include "database.config" . | nindent 12 }} diff --git a/chart/templates/network-config.yaml b/chart/templates/network-config.yaml new file mode 100644 index 0000000..21de966 --- /dev/null +++ b/chart/templates/network-config.yaml @@ -0,0 +1,10 @@ +{{- if .Values.config.networkConfigurationFile }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: po-chronicler-deployment-config + namespace: {{ .Release.Namespace }} +data: + networkConfiguration.json: |- + {{ .Values.config.networkConfigurationFile | nindent 4 }} +{{- end }} diff --git a/chart/templates/service.yaml b/chart/templates/service.yaml index 36f2e8a..17f725d 100644 --- a/chart/templates/service.yaml +++ b/chart/templates/service.yaml @@ -11,5 +11,9 @@ spec: ports: - name: rest protocol: TCP - port: 5000 + port: {{ .Values.service.httpPort }} targetPort: 5000 + - name: grpc + protocol: TCP + port: {{ .Values.service.grpcPort }} + targetPort: 5001 diff --git a/chart/tests/deployment_test.yaml b/chart/tests/deployment_test.yaml new file mode 100644 index 0000000..d52e049 --- /dev/null +++ b/chart/tests/deployment_test.yaml @@ -0,0 +1,98 @@ +suite: Test Suite for Chronicler Chart + +templates: + - deployment.yaml + +set: + config: + networkConfigurationUri: "_" + postgresql: + host: "_" + port: "_" + database: "_" + username: "_" + password: "_" + +tests: + - it: should set PostgreSQL configuration + set: + postgresql: + host: "localhost" + port: "1234" + database: "someDatabase" + username: + configMapRef: + name: "someUsernameName" + key: "someUsernameKey" + password: + secretRef: + name: "somePasswordName" + key: "somePasswordKey" + asserts: + - isKind: + of: Deployment + - matchRegex: + path: spec.template.spec.containers[0].env[?(@.name=="DB_HOST")].value + pattern: "localhost" + - matchRegex: + path: spec.template.spec.containers[0].env[?(@.name=="DB_PORT")].value + pattern: "1234" + - matchRegex: + path: spec.template.spec.containers[0].env[?(@.name=="DB_DATABASE")].value + pattern: "someDatabase" + - matchRegex: + path: spec.template.spec.containers[0].env[?(@.name=="DB_USERNAME")].valueFrom.configMapKeyRef.name + pattern: "someUsernameName" + - matchRegex: + path: spec.template.spec.containers[0].env[?(@.name=="DB_USERNAME")].valueFrom.configMapKeyRef.key + pattern: "someUsernameKey" + - matchRegex: + path: spec.template.spec.containers[0].env[?(@.name=="DB_PASSWORD")].valueFrom.secretKeyRef.name + pattern: "somePasswordName" + - matchRegex: + path: spec.template.spec.containers[0].env[?(@.name=="DB_PASSWORD")].valueFrom.secretKeyRef.key + pattern: "somePasswordKey" + - it: should set network configuration correctly when networkConfigurationUri is set + set: + config: + networkConfigurationUri: "http://example.com" + asserts: + - isKind: + of: Deployment + - matchRegex: + path: spec.template.spec.containers[0].env[?(@.name=="NetworkConfigurationUri")].value + pattern: "http://example.com" + - it: should add a volume and volume mount for network configuration when networkConfigurationFile is set + set: + config: + networkConfigurationFile: |- + { + "RegistryUrls": { + "narnia": "https://narnia.com", + } + } + asserts: + - isKind: + of: Deployment + - matchRegex: + path: spec.template.spec.containers[0].env[?(@.name=="NetworkConfigurationUri")].value + pattern: "/etc/config/networkConfiguration.json" + - matchRegex: + path: spec.template.spec.containers[0].volumeMounts[1].name + pattern: "config-volume" + - matchRegex: + path: spec.template.spec.volumes[?(@.name=="config-volume")].configMap.items[0].key + pattern: "networkConfiguration.json" + - it: should set areas correctly + set: + config: + gridAreas: + - "area1" + - "area2" + asserts: + - matchRegex: + path: spec.template.spec.containers[0].env[?(@.name=="chronicler__GridAreas__0")].value + pattern: "area1" + - matchRegex: + path: spec.template.spec.containers[0].env[?(@.name=="chronicler__GridAreas__1")].value + pattern: "area2" diff --git a/chart/tests/network_config_test.yaml b/chart/tests/network_config_test.yaml new file mode 100644 index 0000000..05b4f8f --- /dev/null +++ b/chart/tests/network_config_test.yaml @@ -0,0 +1,36 @@ +suite: Test Suite for Chronicler Chart + +templates: + - network-config.yaml + +set: + postgresql: + host: "_" + port: "_" + database: "_" + username: "_" + password: "_" + +tests: + - it: should create a ConfigMap for network configuration + set: + config: + networkConfigurationFile: |- + { + "RegistryUrls": { + "narnia": "https://narnia.com", + } + } + asserts: + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + + - it: should not create a ConfigMap for network configuration when networkConfigurationFile is not set + set: + config: + networkConfigurationUri: "https://narnia.com" + asserts: + - hasDocuments: + count: 0 diff --git a/chart/values.yaml b/chart/values.yaml index e7ec743..e2791c1 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -2,25 +2,90 @@ # image defines the image configuration for the chronicler server image: - # repository defines the image repository to use repository: ghcr.io/project-origin/chronicler - # tag defines the image tag to use, defaults to the chart appVersion tag: # service holds the configuration for the service service: - # type is the type of service to create, allowed values are (ClusterIP | NodePort | LoadBalancer) + # type is the type of service to create, allowed values are (ClusterIP | LoadBalancer) type: ClusterIP - # nodePort is the port to expose the service on if type is NodePort - nodePort: + # httpPort is the port to use for the http service + httpPort: 5000 + # grpcPort is the port to use for the grpc service + grpcPort: 5001 + +# replicaCount defines the number of replicas to run +replicaCount: 3 # config holds general configuration for the chronicler server config: - - # pathBase defines the base part of all paths in the api of the chronicler server, defaults to /api - pathBase: /api + # signingKeySecret defines the secret that holds the signing key for the chronicler server + signingKeySecret: + name: + key: + + # jobInterval defines the interval to run the job to check for new blocks on the regitries, defaults to 1h + jobInterval: 01:00:00 + + # gridAreas defines a list of grid areas to sign claimIntents for + gridAreas: + #- narnia + + # networkConfigurationUri defines an uri to fetch the network configuration from, allowed formats are json + networkConfigurationUri: #https://raw.githubusercontent.com/project-origin/chronicler/main/configs/networkConfiguration.json + + # networkConfigurationFile defines the network configuration as a string in json format + networkConfigurationFile: # |- + # { + # "RegistryUrls": { + # "narniaRegistry": "https://registry.narnia.example.com", + # } + # } + +# postgresql holds the configuration for the postgresql database, each value is required and can be set directly, via a secretRef, or via a configMapRef +postgresql: + # host defines the host of the postgresql database + host: + # secretRef: + # name: + # key: + # configMapRef: + # name: + # key: + # port defines the port of the postgresql database + port: "5432" + # secretRef: + # name: + # key: + # configMapRef: + # name: + # key: + # database defines the name of the postgresql database + database: + # secretRef: + # name: + # key: + # configMapRef: + # name: + # key: + # username defines the username to use to connect to the postgresql database + username: + # secretRef: + # name: + # key: + # configMapRef: + # name: + # key: + # password defines the password to use to connect to the postgresql database + password: + # secretRef: + # name: + # key: + # configMapRef: + # name: + # key: # otlp holds the configuration for the opentelemetry collector otlp: @@ -28,61 +93,3 @@ otlp: enabled: false # endpoint defines the endpoint of the opentelemetry collector, example "http://otel-collector:4317" endpoint: "http://opentelemetry-collector.otel-collector:4317" - -messageBroker: - # type defines the type of message broker to use, allowed values are (inMemory | rabbitmq | rabbitmqOperator) - type: - - # rabbitmq defines the rabbitmq configuration for the message broker if type is rabbitmq, with rabbitmqOperator or inMemory this is ignored - rabbitmq: - # host defines the host of the rabbitmq server in url format 'http://localhost:15672/' - host: - # port defines the port of the rabbitmq server, defaults to 5672 - port: 5672 - # username defines the username to use to connect to the rabbitmq server - username: - # password defines the password to use to connect to the rabbitmq server - password: - -# chronicler defines the deployment configuration for the chronicler server -chronicler: - # replicas defines the number of chronicler server instances to run - replicaCount: 3 - - # Registries is a list of all the known registries in the network -registries: [] - # example - # - name: test - # address: http://test-registry:80 - -# persistence defines the persistence configuration for the chronicler server -persistence: - - # type defines which database to use. "CloudNativePG" for built-in PostgreSQL or "BYOD" (Bring Your Own Database) for using an external PostgreSQL database. Only PostgreSQL is supported. - type: "CloudNativePG" - - # cloudNativePG determines if the database is created as a cloud native postgresql instance - cloudNativePG: - - # name defines the name of the cloud native postgresql instance - name: cnpg-chronicler-db - - # owner defines the owner of the database - owner: app - - # database defines the name of the database to create - database: chronicler-database - - # replicas defines the number of database instances to run - replicas: 3 - - # storage defines the storage configuration for the database - size: 10Gi - - # BYOD (Bring Your Own Database) configuration - byod: - - # Create a secret with the DB connection info and provide the secret name here - secretName: "" - # Specify the key within the secret that contains the DB connection string - secretKey: "" diff --git a/src/Chronicler.Dockerfile b/src/Chronicler.Dockerfile index a73dc34..ead2707 100644 --- a/src/Chronicler.Dockerfile +++ b/src/Chronicler.Dockerfile @@ -28,4 +28,5 @@ RUN chmod -R 655 . USER $USER EXPOSE 5000 +EXPOSE 5001 ENTRYPOINT ["/bin/sh", "-c", "dotnet ${APPLICATION}.dll \"${@}\"", "--" ] diff --git a/src/ProjectOrigin.Chronicler.Server/BlockReader/BlockReaderBackgroundService.cs b/src/ProjectOrigin.Chronicler.Server/BlockReader/BlockReaderBackgroundService.cs new file mode 100644 index 0000000..7d54cb7 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/BlockReader/BlockReaderBackgroundService.cs @@ -0,0 +1,51 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using ProjectOrigin.Chronicler.Server.Options; + +namespace ProjectOrigin.Chronicler.Server.BlockReader; + +public class BlockReaderBackgroundService : BackgroundService +{ + private readonly ILogger _logger; + private readonly ChroniclerOptions _options; + private readonly IServiceProvider _serviceProvider; + + public BlockReaderBackgroundService( + ILogger logger, + IOptions options, + IServiceProvider serviceProvider) + { + _logger = logger; + _options = options.Value; + _serviceProvider = serviceProvider; + } + + protected override async Task ExecuteAsync(CancellationToken stoppingToken) + { + using var timer = new PeriodicTimer(_options.JobInterval); + + do + { + try + { + _logger.LogTrace("Executing BlockReader"); + + var job = _serviceProvider.GetRequiredService(); + await job.ProcessAllRegistries(stoppingToken); + + _logger.LogTrace("Executed BlockReader"); + } + catch (Exception ex) + { + _logger.LogCritical(ex, "Error executing BlockReader"); + } + } + while (!stoppingToken.IsCancellationRequested && + await timer.WaitForNextTickAsync(stoppingToken)); + } +} diff --git a/src/ProjectOrigin.Chronicler.Server/BlockReader/BlockReaderJob.cs b/src/ProjectOrigin.Chronicler.Server/BlockReader/BlockReaderJob.cs new file mode 100644 index 0000000..a301b86 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/BlockReader/BlockReaderJob.cs @@ -0,0 +1,179 @@ +using System; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using ProjectOrigin.Chronicler.Server.Models; +using ProjectOrigin.Chronicler.Server.Options; +using ProjectOrigin.Chronicler.Server.Repositories; +using ProjectOrigin.ServiceCommon.Database; + +namespace ProjectOrigin.Chronicler.Server.BlockReader; + +public class BlockReaderJob : IBlockReaderJob +{ + private readonly ILogger _logger; + private readonly ChroniclerOptions _options; + private readonly IUnitOfWork _unitOfWork; + private readonly IRegistryService _registryService; + + public BlockReaderJob( + ILogger logger, + IOptions options, + IUnitOfWork unitOfWork, + IRegistryService registryService) + { + _logger = logger; + _options = options.Value; + _unitOfWork = unitOfWork; + _registryService = registryService; + } + + public async Task ProcessAllRegistries(CancellationToken cancellationToken) + { + var registries = (await _unitOfWork.GetRepository().GetRegistriesToCrawl()) + .ToList(); + + foreach (var lastReadBlock in registries) + { + if (cancellationToken.IsCancellationRequested) + return; + + await ProcessRegistryBlocks(lastReadBlock.RegistryName, lastReadBlock.BlockHeight, cancellationToken); + } + } + + + public async Task ProcessRegistryBlocks(string registryName, int previousBlockHeight, CancellationToken cancellationToken) + { + var lastProcessedBlockHeight = previousBlockHeight; + + while (!cancellationToken.IsCancellationRequested) + { + var block = await _registryService.GetNextBlock(registryName, lastProcessedBlockHeight); + if (block != null) + { + if (block.Height != lastProcessedBlockHeight + 1) + { + _logger.LogError("Block height mismatch for registry {RegistryName}. Expected {ExpectedHeight}, got {BlockHeight}.", registryName, lastProcessedBlockHeight + 1, block.Height); + break; + } + + await ProcessBlock(_unitOfWork, registryName, block); + lastProcessedBlockHeight = block.Height; + } + else + { + _logger.LogTrace("No new blocks found for registry {RegistryName} at height {BlockHeight}", registryName, lastProcessedBlockHeight + 1); + break; + } + } + } + + private async Task ProcessBlock(IUnitOfWork unitOfWork, string registryName, Registry.V1.Block block) + { + var repository = unitOfWork.GetRepository(); + + foreach (var transaction in block.Transactions) + { + if (transaction.Header.PayloadType == Electricity.V1.IssuedEvent.Descriptor.FullName) + { + await ProcessIssuedEvent(repository, transaction); + } + else if (transaction.Header.PayloadType == Electricity.V1.AllocatedEvent.Descriptor.FullName) + { + await ProcessAllocatedEvent(repository, transaction); + } + else if (transaction.Header.PayloadType == Electricity.V1.ClaimedEvent.Descriptor.FullName) + { + await ProcessClaimedEvent(repository, transaction); + } + } + + await repository.UpsertReadBlock(new LastReadBlock + { + RegistryName = registryName, + BlockHeight = block.Height, + ReadAt = DateTimeOffset.UtcNow, + }); + + unitOfWork.Commit(); + } + + /// + /// Analyse an IssuedEvent and insert the certificate info into the database. + /// + private async Task ProcessIssuedEvent(IChroniclerRepository repository, Registry.V1.Transaction transaction) + { + var issuedEvent = Electricity.V1.IssuedEvent.Parser.ParseFrom(transaction.Payload); + if (_options.GridAreas.Contains(issuedEvent.GridArea)) + { + var fid = transaction.Header.FederatedStreamId.ToModel(); + await repository.InsertCertificateInfo(new CertificateInfo + { + RegistryName = fid.RegistryName, + CertificateId = fid.StreamId, + StartTime = issuedEvent.Period.Start.ToDateTimeOffset(), + EndTime = issuedEvent.Period.End.ToDateTimeOffset(), + GridArea = issuedEvent.GridArea, + }); + } + } + + private async Task ProcessAllocatedEvent(IChroniclerRepository repository, Registry.V1.Transaction transaction) + { + var fid = transaction.Header.FederatedStreamId.ToModel(); + + var allocatedEvent = Electricity.V1.AllocatedEvent.Parser.ParseFrom(transaction.Payload); + var allocationId = Guid.Parse(allocatedEvent.AllocationId.Value); + + var sliceCommitmentHash = allocatedEvent.ConsumptionCertificateId.Equals(transaction.Header.FederatedStreamId) ? + allocatedEvent.ConsumptionSourceSliceHash : allocatedEvent.ProductionSourceSliceHash; + + var claimIntent = await repository.GetClaimIntent(fid, sliceCommitmentHash.ToByteArray()); + if (claimIntent != null) + { + await repository.InsertClaimAllocation(new ClaimAllocation + { + Id = Guid.NewGuid(), + ClaimIntentId = claimIntent.Id, + RegistryName = fid.RegistryName, + CertificateId = fid.StreamId, + AllocationId = allocationId, + }); + } + else + { + _logger.LogTrace("Allocation for certificate {registry}-{certificateId} not relevant", fid.RegistryName, fid.StreamId); + } + } + + private async Task ProcessClaimedEvent(IChroniclerRepository repository, Registry.V1.Transaction transaction) + { + var fid = transaction.Header.FederatedStreamId.ToModel(); + var claimedEvent = Electricity.V1.ClaimedEvent.Parser.ParseFrom(transaction.Payload); + var allocationId = Guid.Parse(claimedEvent.AllocationId.Value); + + var allocation = await repository.GetClaimAllocation(fid, allocationId); + if (allocation != null) + { + var claimIntent = await repository.GetClaimIntent(allocation.ClaimIntentId); + + await repository.InsertClaimRecord(new ClaimRecord + { + Id = Guid.NewGuid(), + RegistryName = fid.RegistryName, + CertificateId = fid.StreamId, + Quantity = claimIntent.Quantity, + RandomR = claimIntent.RandomR, + }); + await repository.DeleteClaimIntent(claimIntent.Id); + await repository.DeleteClaimAllocation(allocation.Id); + } + else + { + _logger.LogTrace("Claim for certificate {registry}-{certificateId} not relevant", fid.RegistryName, fid.StreamId); + } + } +} diff --git a/src/ProjectOrigin.Chronicler.Server/BlockReader/IBlockReaderJob.cs b/src/ProjectOrigin.Chronicler.Server/BlockReader/IBlockReaderJob.cs new file mode 100644 index 0000000..f5c94b4 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/BlockReader/IBlockReaderJob.cs @@ -0,0 +1,10 @@ +using System.Threading; +using System.Threading.Tasks; + +namespace ProjectOrigin.Chronicler.Server.BlockReader; + +public interface IBlockReaderJob +{ + Task ProcessAllRegistries(CancellationToken cancellationToken); + Task ProcessRegistryBlocks(string registryName, int previousBlockHeight, CancellationToken cancellationToken); +} diff --git a/src/ProjectOrigin.Chronicler.Server/DatabaseScripts/1.sql b/src/ProjectOrigin.Chronicler.Server/DatabaseScripts/1.sql index 40bb048..0fd3899 100644 --- a/src/ProjectOrigin.Chronicler.Server/DatabaseScripts/1.sql +++ b/src/ProjectOrigin.Chronicler.Server/DatabaseScripts/1.sql @@ -2,7 +2,57 @@ CREATE TABLE claim_intents ( id uuid NOT NULL PRIMARY KEY, registry_name VARCHAR(64) NOT NULL, certificate_id uuid NOT NULL, - commitment bytea NOT NULL, + commitment_hash bytea NOT NULL, + quantity bigint NOT NULL, + random_r bytea NOT NULL +); + +CREATE TABLE read_blocks ( + registry_name VARCHAR(64) NOT NULL PRIMARY KEY, + block_height int NOT NULL, + read_at timestamp with time zone NOT NULL +); + +CREATE OR REPLACE FUNCTION insert_read_block() +RETURNS TRIGGER AS $$ +BEGIN + PERFORM 1 FROM read_blocks WHERE registry_name = NEW.registry_name; + + IF NOT FOUND THEN + INSERT INTO read_blocks (registry_name, block_height, read_at) + VALUES (NEW.registry_name, -1, NOW()); + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER insert_read_block_trigger +AFTER INSERT ON claim_intents +FOR EACH ROW +EXECUTE FUNCTION insert_read_block(); + +CREATE TABLE certificate_infos ( + registry_name VARCHAR(64) NOT NULL, + certificate_id uuid NOT NULL, + start_time timestamp with time zone NOT NULL, + end_time timestamp with time zone NOT NULL, + grid_area VARCHAR(64) NOT NULL, + PRIMARY KEY (registry_name, certificate_id) +); + +CREATE TABLE claim_allocations ( + id uuid NOT NULL PRIMARY KEY, + claim_intent_id uuid NOT NULL, + registry_name VARCHAR(64) NOT NULL, + certificate_id uuid NOT NULL, + allocation_id uuid NOT NULL +); + +CREATE TABLE claim_records ( + id uuid NOT NULL PRIMARY KEY, + registry_name VARCHAR(64) NOT NULL, + certificate_id uuid NOT NULL, quantity bigint NOT NULL, random_r bytea NOT NULL ); diff --git a/src/ProjectOrigin.Chronicler.Server/Exceptions/RegistryNotKnownException.cs b/src/ProjectOrigin.Chronicler.Server/Exceptions/RegistryNotKnownException.cs new file mode 100644 index 0000000..e220e30 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/Exceptions/RegistryNotKnownException.cs @@ -0,0 +1,12 @@ +using System; + +namespace ProjectOrigin.Chronicler.Server.Exceptions +{ + public class RegistryNotKnownException : Exception + { + public RegistryNotKnownException(string registryName) + : base($"Registry {registryName} is not known") + { + } + } +} diff --git a/src/ProjectOrigin.Chronicler.Server/IRegistryClientFactory.cs b/src/ProjectOrigin.Chronicler.Server/IRegistryClientFactory.cs new file mode 100644 index 0000000..80ab8cb --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/IRegistryClientFactory.cs @@ -0,0 +1,9 @@ +using Grpc.Net.Client; + +namespace ProjectOrigin.Chronicler.Server; + +public interface IRegistryClientFactory +{ + GrpcChannel GetChannel(string registryName); + Registry.V1.RegistryService.RegistryServiceClient GetClient(string registryName); +} diff --git a/src/ProjectOrigin.Chronicler.Server/IRegistryService.cs b/src/ProjectOrigin.Chronicler.Server/IRegistryService.cs new file mode 100644 index 0000000..1efd2f5 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/IRegistryService.cs @@ -0,0 +1,8 @@ +using System.Threading.Tasks; + +namespace ProjectOrigin.Chronicler.Server; + +public interface IRegistryService +{ + Task GetNextBlock(string registryName, int previousBlockHeight); +} diff --git a/src/ProjectOrigin.Chronicler.Server/Models/CertificateInfo.cs b/src/ProjectOrigin.Chronicler.Server/Models/CertificateInfo.cs new file mode 100644 index 0000000..5795839 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/Models/CertificateInfo.cs @@ -0,0 +1,12 @@ +using System; + +namespace ProjectOrigin.Chronicler.Server.Models; + +public record CertificateInfo +{ + public required string RegistryName { get; init; } + public required Guid CertificateId { get; init; } + public required DateTimeOffset StartTime { get; init; } + public required DateTimeOffset EndTime { get; init; } + public required string GridArea { get; init; } +} diff --git a/src/ProjectOrigin.Chronicler.Server/Models/ClaimAllocation.cs b/src/ProjectOrigin.Chronicler.Server/Models/ClaimAllocation.cs new file mode 100644 index 0000000..1549e01 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/Models/ClaimAllocation.cs @@ -0,0 +1,12 @@ +using System; + +namespace ProjectOrigin.Chronicler.Server.Models; + +public record ClaimAllocation +{ + public required Guid Id { get; init; } + public required Guid ClaimIntentId { get; init; } + public required string RegistryName { get; init; } + public required Guid CertificateId { get; init; } + public required Guid AllocationId { get; init; } +} diff --git a/src/ProjectOrigin.Chronicler.Server/Models/ClaimIntent.cs b/src/ProjectOrigin.Chronicler.Server/Models/ClaimIntent.cs index 72c7a3c..5953f97 100644 --- a/src/ProjectOrigin.Chronicler.Server/Models/ClaimIntent.cs +++ b/src/ProjectOrigin.Chronicler.Server/Models/ClaimIntent.cs @@ -9,5 +9,5 @@ public record ClaimIntent public required Guid CertificateId { get; init; } public required long Quantity { get; init; } public required byte[] RandomR { get; init; } - public required byte[] Commitment { get; init; } + public required byte[] CommitmentHash { get; init; } } diff --git a/src/ProjectOrigin.Chronicler.Server/Models/ClaimRecord.cs b/src/ProjectOrigin.Chronicler.Server/Models/ClaimRecord.cs new file mode 100644 index 0000000..643495b --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/Models/ClaimRecord.cs @@ -0,0 +1,12 @@ +using System; + +namespace ProjectOrigin.Chronicler.Server.Models; + +public record ClaimRecord +{ + public required Guid Id { get; init; } + public required string RegistryName { get; init; } + public required Guid CertificateId { get; init; } + public required long Quantity { get; init; } + public required byte[] RandomR { get; init; } +} diff --git a/src/ProjectOrigin.Chronicler.Server/Models/FederatedCertificateId.cs b/src/ProjectOrigin.Chronicler.Server/Models/FederatedCertificateId.cs new file mode 100644 index 0000000..faa7952 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/Models/FederatedCertificateId.cs @@ -0,0 +1,33 @@ +using System; + +namespace ProjectOrigin.Chronicler.Server.Models; + +public record FederatedCertificateId +{ + public required string RegistryName { get; init; } + public required Guid StreamId { get; init; } +} + +public static class FederatedCertificateIdExtensions +{ + public static FederatedCertificateId ToModel(this Common.V1.FederatedStreamId fid) + { + return new FederatedCertificateId + { + RegistryName = fid.Registry, + StreamId = Guid.Parse(fid.StreamId.Value) + }; + } + + public static Common.V1.FederatedStreamId ToProto(this FederatedCertificateId fid) + { + return new Common.V1.FederatedStreamId + { + Registry = fid.RegistryName, + StreamId = new Common.V1.Uuid + { + Value = fid.StreamId.ToString() + } + }; + } +} diff --git a/src/ProjectOrigin.Chronicler.Server/Models/ReadBlock.cs b/src/ProjectOrigin.Chronicler.Server/Models/ReadBlock.cs new file mode 100644 index 0000000..f351203 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/Models/ReadBlock.cs @@ -0,0 +1,10 @@ +using System; + +namespace ProjectOrigin.Chronicler.Server.Models; + +public record LastReadBlock +{ + public required string RegistryName { get; init; } + public required int BlockHeight { get; init; } + public required DateTimeOffset ReadAt { get; init; } +} diff --git a/src/ProjectOrigin.Chronicler.Server/Options/ChroniclerOptions.cs b/src/ProjectOrigin.Chronicler.Server/Options/ChroniclerOptions.cs index 9405ba1..06d82bd 100644 --- a/src/ProjectOrigin.Chronicler.Server/Options/ChroniclerOptions.cs +++ b/src/ProjectOrigin.Chronicler.Server/Options/ChroniclerOptions.cs @@ -11,11 +11,13 @@ public record ChroniclerOptions : IValidatableObject public const string SectionPrefix = "chronicler"; public required string SigningKeyFilename { get; init; } - - private readonly Lazy _privateKey; + public required TimeSpan JobInterval { get; init; } + public IEnumerable GridAreas { get; init; } = new List(); public IPrivateKey GetPrivateKey() => _privateKey.Value; + private readonly Lazy _privateKey; + public ChroniclerOptions() { _privateKey = new Lazy(() => diff --git a/src/ProjectOrigin.Chronicler.Server/Options/NetworkOptions.cs b/src/ProjectOrigin.Chronicler.Server/Options/NetworkOptions.cs new file mode 100644 index 0000000..392a2e2 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/Options/NetworkOptions.cs @@ -0,0 +1,8 @@ +using System.Collections.Generic; + +namespace ProjectOrigin.Chronicler.Server.Options; + +public record NetworkOptions +{ + public required IDictionary RegistryUrls { get; init; } +} diff --git a/src/ProjectOrigin.Chronicler.Server/ProjectOrigin.Chronicler.Server.csproj b/src/ProjectOrigin.Chronicler.Server/ProjectOrigin.Chronicler.Server.csproj index 2760a14..2dc0ecb 100644 --- a/src/ProjectOrigin.Chronicler.Server/ProjectOrigin.Chronicler.Server.csproj +++ b/src/ProjectOrigin.Chronicler.Server/ProjectOrigin.Chronicler.Server.csproj @@ -27,6 +27,12 @@ + + https://raw.githubusercontent.com/project-origin/registry/v1.3.0/src/Protos/registry.proto + + + https://raw.githubusercontent.com/project-origin/registry/v1.1.0/src/Protos/electricity.proto + https://raw.githubusercontent.com/project-origin/registry/v1.3.0/src/Protos/common.proto diff --git a/src/ProjectOrigin.Chronicler.Server/RegistryClientFactory.cs b/src/ProjectOrigin.Chronicler.Server/RegistryClientFactory.cs new file mode 100644 index 0000000..9c936c9 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/RegistryClientFactory.cs @@ -0,0 +1,63 @@ +using System.Collections.Concurrent; +using System.Net.Http; +using Grpc.Net.Client; +using Microsoft.Extensions.Options; +using ProjectOrigin.Chronicler.Server.Exceptions; +using ProjectOrigin.Chronicler.Server.Options; +using static ProjectOrigin.Registry.V1.RegistryService; + +namespace ProjectOrigin.Chronicler.Server; + +public class RegistryClientFactory : IRegistryClientFactory +{ + private readonly ConcurrentDictionary _registries = new ConcurrentDictionary(); + private readonly IOptionsMonitor _optionsMonitor; + + public RegistryClientFactory(IOptionsMonitor optionsMonitor) + { + _optionsMonitor = optionsMonitor; + _optionsMonitor.OnChange(UpdateNetworkOptions); + } + public GrpcChannel GetChannel(string registryName) + { + return _registries.GetOrAdd( + registryName, + (name) => + { + if (!_optionsMonitor.CurrentValue.RegistryUrls.TryGetValue(name, out var address)) + throw new RegistryNotKnownException($"Registry {name} not found in configuration"); + + return GrpcChannel.ForAddress(address, new GrpcChannelOptions + { + HttpHandler = new SocketsHttpHandler + { + EnableMultipleHttp2Connections = true, + } + }); + }); + } + + public RegistryServiceClient GetClient(string registryName) + { + return new RegistryServiceClient(GetChannel(registryName)); + } + + private void UpdateNetworkOptions(NetworkOptions options, string? _) + { + foreach (var record in _registries) + { + // If the registry is still in the configuration, and the address has changed, we need to recreate the channel + if (options.RegistryUrls.TryGetValue(record.Key, out var uri) + && record.Value.Target != uri + && _registries.TryRemove(record.Key, out var oldChannel)) + { + oldChannel.ShutdownAsync().Wait(); + } + // If the registry is no longer in the configuration, we need to remove the channel + else if (_registries.TryRemove(record.Key, out var removedChannel)) + { + removedChannel.ShutdownAsync().Wait(); + } + } + } +} diff --git a/src/ProjectOrigin.Chronicler.Server/RegistryService.cs b/src/ProjectOrigin.Chronicler.Server/RegistryService.cs new file mode 100644 index 0000000..8a1ca7e --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/RegistryService.cs @@ -0,0 +1,28 @@ +using System.Linq; +using System.Threading.Tasks; + +namespace ProjectOrigin.Chronicler.Server; + +public class RegistryService : IRegistryService +{ + private readonly IRegistryClientFactory _registryClientFactory; + + public RegistryService(IRegistryClientFactory registryClientFactory) + { + _registryClientFactory = registryClientFactory; + } + + public async Task GetNextBlock(string registryName, int previousBlockHeight) + { + var client = _registryClientFactory.GetClient(registryName); + + var response = await client.GetBlocksAsync(new Registry.V1.GetBlocksRequest + { + Skip = previousBlockHeight, + Limit = 1, + IncludeTransactions = true, + }); + + return response.Blocks.SingleOrDefault(); + } +} diff --git a/src/ProjectOrigin.Chronicler.Server/Repositories/ChroniclerRepository.cs b/src/ProjectOrigin.Chronicler.Server/Repositories/ChroniclerRepository.cs index 90d595e..cc8cc5c 100644 --- a/src/ProjectOrigin.Chronicler.Server/Repositories/ChroniclerRepository.cs +++ b/src/ProjectOrigin.Chronicler.Server/Repositories/ChroniclerRepository.cs @@ -1,21 +1,148 @@ +using System; +using System.Collections.Generic; using System.Data; using System.Threading.Tasks; using Dapper; using ProjectOrigin.Chronicler.Server.Models; using ProjectOrigin.ServiceCommon.Database; -using ProjectOrigin.WalletSystem.Server.Repositories; -namespace ProjectOrigin.WalletSystem.Server.Repositories; +namespace ProjectOrigin.Chronicler.Server.Repositories; public class ChroniclerRepository : AbstractRepository, IChroniclerRepository { - public ChroniclerRepository(IDbTransaction transaction) : base(transaction) { } + public ChroniclerRepository(IDbTransaction transaction) : base(transaction) + { + DefaultTypeMap.MatchNamesWithUnderscores = true; + } public Task InsertClaimIntent(ClaimIntent certificate) { return Connection.ExecuteAsync( - @"INSERT INTO claim_intents(id, registry_name, certificate_id, quantity, random_r, commitment) - VALUES (@id, @registryName, @certificateId, @quantity, @randomR, @commitment)", + @"INSERT INTO claim_intents(id, registry_name, certificate_id, quantity, random_r, commitment_hash) + VALUES (@id, @registryName, @certificateId, @quantity, @randomR, @commitmentHash)", certificate); } + + public Task HasClaimIntent(FederatedCertificateId fid, byte[] sliceCommitmentHash) + { + return Connection.ExecuteScalarAsync( + @"SELECT EXISTS( + SELECT 1 + FROM claim_intents + WHERE registry_name = @registryName + AND certificate_id = @streamId + AND commitment_hash = @sliceCommitmentHash)", + new + { + fid.RegistryName, + fid.StreamId, + sliceCommitmentHash + }); + } + + public Task GetClaimIntent(Guid claimIntentId) + { + return Connection.QuerySingleAsync( + @"SELECT id, registry_name, certificate_id, quantity, random_r, commitment_hash + FROM claim_intents + WHERE id = @claimIntentId", + new + { + claimIntentId + }); + } + + public Task GetClaimIntent(FederatedCertificateId fid, byte[] sliceCommitmentHash) + { + return Connection.QuerySingleAsync( + @"SELECT id, registry_name, certificate_id, quantity, random_r, commitment_hash + FROM claim_intents + WHERE registry_name = @registryName + AND certificate_id = @streamId + AND commitment_hash = @sliceCommitmentHash", + new + { + fid.RegistryName, + fid.StreamId, + sliceCommitmentHash + }); + } + + public Task DeleteClaimIntent(Guid id) + { + return Connection.ExecuteAsync( + @"DELETE FROM claim_intents + WHERE id = @id", + new + { + id + }); + } + + public Task> GetRegistriesToCrawl() + { + return Connection.QueryAsync( + @"SELECT registry_name, block_height, read_at + FROM read_blocks"); + } + + public Task UpsertReadBlock(LastReadBlock block) + { + return Connection.ExecuteAsync( + @"INSERT INTO read_blocks(registry_name, block_height, read_at) + VALUES (@registryName, @blockHeight, @readAt) + ON CONFLICT (registry_name) DO UPDATE SET block_height = GREATEST(read_blocks.block_height, @blockHeight), read_at = @readAt", + block); + } + + public Task InsertCertificateInfo(CertificateInfo certificateInfo) + { + return Connection.ExecuteAsync( + @"INSERT INTO certificate_infos(registry_name, certificate_id, start_time, end_time, grid_area) + VALUES (@registryName, @certificateId, @startTime, @endTime, @gridArea)", + certificateInfo); + } + + public Task InsertClaimAllocation(ClaimAllocation allocation) + { + return Connection.ExecuteAsync( + @"INSERT INTO claim_allocations(id, claim_intent_id, registry_name, certificate_id, allocation_id) + VALUES (@id, @claimIntentId, @registryName, @certificateId, @allocationId)", + allocation); + } + + public Task GetClaimAllocation(FederatedCertificateId fid, Guid allocationId) + { + return Connection.QuerySingleAsync( + @"SELECT id, claim_intent_id, registry_name, certificate_id, allocation_id + FROM claim_allocations + WHERE registry_name = @registryName + AND certificate_id = @certificateId + AND allocation_id = @allocationId", + new + { + registryName = fid.RegistryName, + certificateId = fid.StreamId, + allocationId + }); + } + + public Task DeleteClaimAllocation(Guid id) + { + return Connection.ExecuteAsync( + @"DELETE FROM claim_allocations + WHERE id = @id", + new + { + id + }); + } + + public Task InsertClaimRecord(ClaimRecord record) + { + return Connection.ExecuteAsync( + @"INSERT INTO claim_records(id, registry_name, certificate_id, quantity, random_r) + VALUES (@id, @registryName, @certificateId, @quantity, @randomR)", + record); + } } diff --git a/src/ProjectOrigin.Chronicler.Server/Repositories/IChroniclerRepository.cs b/src/ProjectOrigin.Chronicler.Server/Repositories/IChroniclerRepository.cs index 8739b89..e8f3ee1 100644 --- a/src/ProjectOrigin.Chronicler.Server/Repositories/IChroniclerRepository.cs +++ b/src/ProjectOrigin.Chronicler.Server/Repositories/IChroniclerRepository.cs @@ -1,9 +1,27 @@ +using System; +using System.Collections.Generic; using System.Threading.Tasks; using ProjectOrigin.Chronicler.Server.Models; -namespace ProjectOrigin.WalletSystem.Server.Repositories; +namespace ProjectOrigin.Chronicler.Server.Repositories; public interface IChroniclerRepository { Task InsertClaimIntent(ClaimIntent certificate); + Task HasClaimIntent(FederatedCertificateId fid, byte[] sliceCommitmentHash); + Task GetClaimIntent(Guid claimIntentId); + Task GetClaimIntent(FederatedCertificateId fid, byte[] sliceCommitmentHash); + Task DeleteClaimIntent(Guid id); + + Task> GetRegistriesToCrawl(); + Task UpsertReadBlock(LastReadBlock block); + + Task InsertCertificateInfo(CertificateInfo certificateInfo); + + Task InsertClaimAllocation(ClaimAllocation allocation); + Task GetClaimAllocation(FederatedCertificateId fid, Guid allocationId); + Task DeleteClaimAllocation(Guid id); + + Task InsertClaimRecord(ClaimRecord record); + } diff --git a/src/ProjectOrigin.Chronicler.Server/Services/ChroniclerService.cs b/src/ProjectOrigin.Chronicler.Server/Services/ChroniclerService.cs index a4779b2..43caa7a 100644 --- a/src/ProjectOrigin.Chronicler.Server/Services/ChroniclerService.cs +++ b/src/ProjectOrigin.Chronicler.Server/Services/ChroniclerService.cs @@ -8,11 +8,12 @@ using ProjectOrigin.HierarchicalDeterministicKeys.Interfaces; using ProjectOrigin.PedersenCommitment; using ProjectOrigin.ServiceCommon.Database; -using ProjectOrigin.WalletSystem.Server.Repositories; +using ProjectOrigin.Chronicler.Server.Repositories; +using System.Security.Cryptography; namespace ProjectOrigin.Chronicler.Server.Services; -public class ChroniclerService : V1.RegistryService.RegistryServiceBase +public class ChroniclerService : V1.ChroniclerService.ChroniclerServiceBase { private readonly IUnitOfWork _unitOfWork; private readonly IPrivateKey _signingKey; @@ -34,7 +35,7 @@ public ChroniclerService(IUnitOfWork unitOfWork, IOptions chr CertificateId = Guid.Parse(request.CertificateId.StreamId.Value), Quantity = request.Quantity, RandomR = randomR, - Commitment = commitmentInfo.Commitment.C.ToArray(), + CommitmentHash = SHA256.HashData(commitmentInfo.Commitment.C), }; await _unitOfWork.GetRepository().InsertClaimIntent(claimIntent); diff --git a/src/ProjectOrigin.Chronicler.Server/Startup.cs b/src/ProjectOrigin.Chronicler.Server/Startup.cs index 6b14b4f..9002c36 100644 --- a/src/ProjectOrigin.Chronicler.Server/Startup.cs +++ b/src/ProjectOrigin.Chronicler.Server/Startup.cs @@ -7,7 +7,11 @@ using ProjectOrigin.ServiceCommon.Database; using ProjectOrigin.ServiceCommon.Grpc; using ProjectOrigin.ServiceCommon.Otlp; -using ProjectOrigin.WalletSystem.Server.Repositories; +using ProjectOrigin.Chronicler.Server.Repositories; +using Microsoft.Extensions.Options; +using ProjectOrigin.ServiceCommon.UriOptionsLoader; +using ProjectOrigin.Chronicler.Server.BlockReader; +using ProjectOrigin.ServiceCommon.Database.Postgres; namespace ProjectOrigin.Chronicler.Server; @@ -30,10 +34,19 @@ public void ConfigureServices(IServiceCollection services) options.AddRepository(); }); + services.AddSingleton(); + services.AddTransient(); + services.AddOptions() .BindConfiguration(ChroniclerOptions.SectionPrefix) .ValidateDataAnnotations() .ValidateOnStart(); + + services.AddHostedService(); + services.AddTransient(); + + services.AddHttpClient(); + services.ConfigureUriOptionsLoader("network"); } public void Configure(IApplicationBuilder app, IWebHostEnvironment env) diff --git a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/ConfigureExtensions.cs b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/ConfigureExtensions.cs new file mode 100644 index 0000000..f7a28bb --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/ConfigureExtensions.cs @@ -0,0 +1,26 @@ +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Options; + +namespace ProjectOrigin.ServiceCommon.UriOptionsLoader; + +public static class ConfigureExtensions +{ + /// + /// Configures the UriOptionsLoaderService to load the Uri json object into the specified type + /// + /// The type to load the Uri json object into + /// The Microsoft.Extensions.DependencyInjection.IServiceCollection. + /// The name of the configuration section to bind configuration from + public static void ConfigureUriOptionsLoader(this IServiceCollection services, string configSectionPath) where TOption : class + { + services.AddOptions() + .BindConfiguration(configSectionPath) + .ValidateDataAnnotations() + .ValidateOnStart(); + + services.AddSingleton, UriOptionsConfigure>(); + services.AddSingleton, UriOptionsChangeTokenSource>(); + services.AddSingleton>(); + services.AddHostedService(provider => provider.GetRequiredService>()); + } +} diff --git a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptions.cs b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptions.cs new file mode 100644 index 0000000..91460dc --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptions.cs @@ -0,0 +1,12 @@ +using System; +using System.ComponentModel.DataAnnotations; + +namespace ProjectOrigin.ServiceCommon.UriOptionsLoader; + +public record UriOptions +{ + [Required(AllowEmptyStrings = false)] + public required Uri ConfigurationUri { get; init; } + + public TimeSpan RefreshInterval { get; init; } = TimeSpan.FromMinutes(15); +} diff --git a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsChangeTokenSource.cs b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsChangeTokenSource.cs new file mode 100644 index 0000000..2e19759 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsChangeTokenSource.cs @@ -0,0 +1,21 @@ +using Microsoft.Extensions.Options; +using Microsoft.Extensions.Primitives; + +namespace ProjectOrigin.ServiceCommon.UriOptionsLoader; + +public class UriOptionsChangeTokenSource : IOptionsChangeTokenSource where TOptions : class +{ + private readonly UriOptionsLoaderService _httpOptionsLoader; + + public UriOptionsChangeTokenSource(UriOptionsLoaderService httpOptionsLoader) + { + _httpOptionsLoader = httpOptionsLoader; + } + + public string? Name => null; + + public IChangeToken GetChangeToken() + { + return _httpOptionsLoader.OptionChangeToken; + } +} diff --git a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsConfigure.cs b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsConfigure.cs new file mode 100644 index 0000000..c99c4a4 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsConfigure.cs @@ -0,0 +1,18 @@ +using Microsoft.Extensions.Options; + +namespace ProjectOrigin.ServiceCommon.UriOptionsLoader; + +public class UriOptionsConfigure : IConfigureOptions where TOptions : class +{ + private readonly UriOptionsLoaderService _httpOptionsLoader; + + public UriOptionsConfigure(UriOptionsLoaderService httpOptionsLoader) + { + _httpOptionsLoader = httpOptionsLoader; + } + + public void Configure(TOptions options) + { + _httpOptionsLoader.Configure(options); + } +} diff --git a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsLoaderService.cs b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsLoaderService.cs new file mode 100644 index 0000000..c8b9522 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsLoaderService.cs @@ -0,0 +1,130 @@ +using System; +using System.Net.Http; +using System.Net.Http.Json; +using System.Reflection; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using Microsoft.Extensions.Primitives; + +namespace ProjectOrigin.ServiceCommon.UriOptionsLoader; + +public sealed class UriOptionsLoaderService : BackgroundService, IDisposable where TOption : class +{ + private readonly ILogger> _logger; + private readonly IHttpClientFactory _httpClientFactory; + private readonly UriOptions _originOptions; + private readonly PropertyInfo[] _optionProperties; + private CancellationTokenSource _changeTokenSource; + private TOption _option; + + public IChangeToken OptionChangeToken { get; private set; } + + public UriOptionsLoaderService( + ILogger> logger, + IHttpClientFactory httpClientFactory, + IOptions originOptions) + { + _logger = logger; + _httpClientFactory = httpClientFactory; + _originOptions = originOptions.Value; + _optionProperties = typeof(TOption).GetProperties(); + _changeTokenSource = new CancellationTokenSource(); + OptionChangeToken = new CancellationChangeToken(_changeTokenSource.Token); + + _option = LoadRemoteOptions(CancellationToken.None).GetAwaiter().GetResult(); + } + + public void Configure(TOption target) + { + foreach (var property in _optionProperties) + { + if (property.CanRead && property.CanWrite) + { + var targetValue = property.GetValue(target); + var sourceValue = property.GetValue(_option); + + if (!Equals(targetValue, sourceValue)) + { + property.SetValue(target, sourceValue); + } + } + } + } + + protected override async Task ExecuteAsync(CancellationToken stoppingToken) + { + using var timer = new PeriodicTimer(_originOptions.RefreshInterval); + + while (!stoppingToken.IsCancellationRequested && + await timer.WaitForNextTickAsync(stoppingToken)) + { + try + { + var newOptions = await LoadRemoteOptions(stoppingToken); + + if (!newOptions.Equals(_option)) + { + _option = newOptions; + await NotifyChanges(); + } + } + catch (Exception ex) + { + _logger.LogError(ex, "Error while loading options from {uri}", _originOptions.ConfigurationUri); + } + } + } + + private async Task NotifyChanges() + { + var newTokenSource = new CancellationTokenSource(); + OptionChangeToken = new CancellationChangeToken(newTokenSource.Token); + await _changeTokenSource.CancelAsync(); + _changeTokenSource.Dispose(); + _changeTokenSource = newTokenSource; + } + + private async Task LoadRemoteOptions(CancellationToken stoppingToken) + { + if (_originOptions.ConfigurationUri.Scheme == Uri.UriSchemeHttp || + _originOptions.ConfigurationUri.Scheme == Uri.UriSchemeHttps) + { + return await LoadFromHttp(stoppingToken); + } + else if (_originOptions.ConfigurationUri.Scheme == Uri.UriSchemeFile) + { + return await LoadFromFile(stoppingToken); + } + else + { + throw new NotSupportedException($"Unsupported URI scheme: {_originOptions.ConfigurationUri.Scheme}"); + } + } + + private async Task LoadFromHttp(CancellationToken stoppingToken) + { + using var httpClient = _httpClientFactory.CreateClient(); + var response = await httpClient.GetAsync(_originOptions.ConfigurationUri, stoppingToken); + response.EnsureSuccessStatusCode(); + + return await response.Content.ReadFromJsonAsync(stoppingToken) + ?? throw new JsonException("Failed to read options from response."); + } + + private async Task LoadFromFile(CancellationToken stoppingToken) + { + var json = await System.IO.File.ReadAllTextAsync(_originOptions.ConfigurationUri.LocalPath, stoppingToken); + return JsonSerializer.Deserialize(json) + ?? throw new JsonException("Failed to read options from file."); + } + + public override void Dispose() + { + _changeTokenSource.Dispose(); + base.Dispose(); + } +} diff --git a/src/ProjectOrigin.Chronicler.Server/appsettings.json b/src/ProjectOrigin.Chronicler.Server/appsettings.json index 60530a5..e714c4b 100644 --- a/src/ProjectOrigin.Chronicler.Server/appsettings.json +++ b/src/ProjectOrigin.Chronicler.Server/appsettings.json @@ -9,8 +9,12 @@ "AllowedHosts": "*", "Kestrel": { "Endpoints": { - "Grpc": { + "Rest": { "Url": "http://*:5000", + "Protocols": "Http1" + }, + "Grpc": { + "Url": "http://*:5001", "Protocols": "Http2" } } diff --git a/src/ProjectOrigin.Chronicler.Test/BlockReaderBackgroundServiceTests.cs b/src/ProjectOrigin.Chronicler.Test/BlockReaderBackgroundServiceTests.cs new file mode 100644 index 0000000..db060d0 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Test/BlockReaderBackgroundServiceTests.cs @@ -0,0 +1,89 @@ +using System.Threading.Tasks; +using Xunit; +using ProjectOrigin.Chronicler.Server.BlockReader; +using Moq; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using ProjectOrigin.Chronicler.Server.Options; +using System; +using System.Threading; + +namespace ProjectOrigin.Chronicler.Test; + +public class BlockReaderBackgroundServiceTests +{ + + [Fact] + public async Task VerifyBackgroundService_CallsIBlockReaderJob_JobInterval() + { + // Arrange + var job = new Mock(); + + var serviceProvider = new Mock(); + serviceProvider.Setup(provider => provider.GetService(typeof(IBlockReaderJob))) + .Returns(job.Object); + var service = new BlockReaderBackgroundService( + Mock.Of>(), + Options.Create(new ChroniclerOptions + { + SigningKeyFilename = "", + GridAreas = [], + JobInterval = TimeSpan.FromMilliseconds(100) + }), + serviceProvider.Object); + + // Act + await service.StartAsync(default); + + // Assert + await Task.Delay(1000); + job.Verify(x => x.ProcessAllRegistries(It.IsAny()), Times.AtLeast(8)); + job.Verify(x => x.ProcessAllRegistries(It.IsAny()), Times.AtMost(14)); + } + + [Fact] + public async Task VerifyBackgroundService_FaultsWhenJobsFailsWithException() + { + // Arrange + var serviceProvider = new Mock(); + var logger = new Mock>(); + + serviceProvider.Setup(provider => provider.GetService(typeof(IBlockReaderJob))) + .Returns(new FakeJob()); + var service = new BlockReaderBackgroundService( + logger.Object, + Options.Create(new ChroniclerOptions + { + SigningKeyFilename = "", + GridAreas = [], + JobInterval = TimeSpan.FromSeconds(1) + }), + serviceProvider.Object); + + // Act + await service.StartAsync(default); + await Task.Delay(500); + + // Assert + logger.Verify(x => x.Log(LogLevel.Trace, 0, It.IsAny(), null, It.IsAny>()), Times.Once); + logger.Verify(x => x.Log(LogLevel.Critical, 0, It.IsAny(), It.IsAny(), It.IsAny>()), Times.Once); + logger.VerifyNoOtherCalls(); + } + + private class TestException : Exception + { + } + + private class FakeJob : IBlockReaderJob + { + public Task ProcessAllRegistries(CancellationToken cancellationToken) + { + throw new TestException(); + } + + public Task ProcessRegistryBlocks(string registryName, int previousBlockHeight, CancellationToken cancellationToken) + { + throw new TestException(); + } + } +} diff --git a/src/ProjectOrigin.Chronicler.Test/BlockReaderJobTests.cs b/src/ProjectOrigin.Chronicler.Test/BlockReaderJobTests.cs new file mode 100644 index 0000000..09c131d --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Test/BlockReaderJobTests.cs @@ -0,0 +1,458 @@ +using System.Threading.Tasks; +using Xunit; +using ProjectOrigin.Chronicler.Server.BlockReader; +using Moq; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using ProjectOrigin.Chronicler.Server.Options; +using System; +using ProjectOrigin.ServiceCommon.Database; +using ProjectOrigin.Chronicler.Server; +using ProjectOrigin.Chronicler.Server.Repositories; +using Google.Protobuf; +using ProjectOrigin.Chronicler.Server.Models; +using AutoFixture; +using Google.Protobuf.WellKnownTypes; + +namespace ProjectOrigin.Chronicler.Test; + +public class BlockReaderJobTests +{ + private const string RegistryName = "someRegistry"; + private const string GridArea = "Narnia"; + private readonly Mock _repository; + private readonly Mock> _logger; + private Mock _registryService; + private BlockReaderJob _job; + + public BlockReaderJobTests() + { + _repository = new Mock(); + _logger = new Mock>(); + var unitOfWork = new Mock(); + unitOfWork.Setup(x => x.GetRepository()).Returns(_repository.Object); + _registryService = new Mock(); + _job = new BlockReaderJob( + _logger.Object, + Options.Create(new ChroniclerOptions + { + SigningKeyFilename = "", + GridAreas = [GridArea], + JobInterval = TimeSpan.FromSeconds(1), + }), + unitOfWork.Object, + _registryService.Object); + + } + + [Fact] + public async Task Verify_GetNextBlock_Called() + { + // Arrange + + // Act + await _job.ProcessRegistryBlocks(RegistryName, 0, default); + + // Assert + _registryService.Verify(x => x.GetNextBlock(RegistryName, 0), Times.Once); + _registryService.VerifyNoOtherCalls(); + } + + [Fact] + public async Task Verify_GetNextBlock_IfReturnedCallsNext() + { + // Arrange + _registryService.Setup(x => x.GetNextBlock(RegistryName, 0)).ReturnsAsync(new Registry.V1.Block + { + Height = 1, + }); + + // Act + await _job.ProcessRegistryBlocks(RegistryName, 0, default); + + // Assert + _registryService.Verify(x => x.GetNextBlock(RegistryName, 0), Times.Once); + _registryService.Verify(x => x.GetNextBlock(RegistryName, 1), Times.Once); + _registryService.VerifyNoOtherCalls(); + } + + [Fact] + public async Task Verify_GetNextBlock_IfReturnedCallsNextAndNext() + { + // Arrange + _registryService.Setup(x => x.GetNextBlock(RegistryName, 0)).ReturnsAsync(new Registry.V1.Block + { + Height = 1, + }); + _registryService.Setup(x => x.GetNextBlock(RegistryName, 1)).ReturnsAsync(new Registry.V1.Block + { + Height = 2, + }); + + + // Act + await _job.ProcessRegistryBlocks(RegistryName, 0, default); + + // Assert + _registryService.Verify(x => x.GetNextBlock(RegistryName, 0), Times.Once); + _registryService.Verify(x => x.GetNextBlock(RegistryName, 1), Times.Once); + _registryService.Verify(x => x.GetNextBlock(RegistryName, 2), Times.Once); + _registryService.VerifyNoOtherCalls(); + } + + [Fact] + public async Task Verify_GetNextBlock_LogErrorIfOutOfOrder() + { + // Arrange + _registryService.Setup(x => x.GetNextBlock(RegistryName, 0)).ReturnsAsync(new Registry.V1.Block + { + Height = 1, + }); + _registryService.Setup(x => x.GetNextBlock(RegistryName, 1)).ReturnsAsync(new Registry.V1.Block + { + Height = 3, + }); + + // Act + await _job.ProcessRegistryBlocks(RegistryName, 0, default); + + // Assert + _registryService.Verify(x => x.GetNextBlock(RegistryName, 0), Times.Once); + _registryService.Verify(x => x.GetNextBlock(RegistryName, 1), Times.Once); + _registryService.Verify(x => x.GetNextBlock(RegistryName, 2), Times.Never); + _registryService.VerifyNoOtherCalls(); + + _repository.Verify(x => x.UpsertReadBlock(It.Is(x => x.BlockHeight == 1)), Times.Once); + _repository.VerifyNoOtherCalls(); + + _logger.Verify(x => x.Log(LogLevel.Error, 0, It.IsAny(), null, It.IsAny>()), Times.Once); + } + + [Fact] + public async Task Verify_CertificateInfo_Inserted() + { + // Arrange + var fixture = new Fixture(); + var streamId = fixture.Create(); + var start = fixture.Create(); + var end = fixture.Create(); + + var block = new Registry.V1.Block + { + Height = 1, + }; + block.AddIssued(new FederatedCertificateId { RegistryName = RegistryName, StreamId = streamId }, start, end, GridArea); + _registryService.Setup(x => x.GetNextBlock(RegistryName, 0)).ReturnsAsync(block); + + // Act + await _job.ProcessRegistryBlocks(RegistryName, 0, default); + + // Assert + _repository.Verify(x => x.UpsertReadBlock(It.Is(x => x.BlockHeight == 1)), Times.Once); + _repository.Verify(x => x.InsertCertificateInfo(It.Is( + x => x.GridArea == GridArea + && x.RegistryName == RegistryName + && x.CertificateId == streamId + && x.StartTime == start + && x.EndTime == end + )), Times.Once); + _repository.VerifyNoOtherCalls(); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task Verify_Allocated_NotFound_NotInserted(bool isProduction) + { + // Arrange + var fixture = new Fixture(); + var certificateId = new FederatedCertificateId { RegistryName = RegistryName, StreamId = fixture.Create() }; + var allocationId = fixture.Create(); + var sliceHash = fixture.Create(); + var block = new Registry.V1.Block + { + Height = 1, + }; + if (isProduction) + { + block.AddProductionAllocated(allocationId, certificateId, sliceHash); + } + else + { + block.AddConsumptionAllocated(allocationId, certificateId, sliceHash); + } + _registryService.Setup(x => x.GetNextBlock(RegistryName, 0)).ReturnsAsync(block); + + // Act + await _job.ProcessRegistryBlocks(RegistryName, 0, default); + + // Assert + _repository.Verify(x => x.UpsertReadBlock(It.Is(x => x.BlockHeight == 1)), Times.Once); + _repository.Verify(x => x.GetClaimIntent(certificateId, It.IsAny()), Times.Once); + _repository.VerifyNoOtherCalls(); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task Verify_ConsumptionAllocated_Found_Inserted(bool isProduction) + { + // Arrange + var fixture = new Fixture(); + var claimIntentId = Guid.NewGuid(); + var certificateId = new FederatedCertificateId { RegistryName = RegistryName, StreamId = fixture.Create() }; + var allocationId = fixture.Create(); + var sliceHash = fixture.Create(); + var block = new Registry.V1.Block + { + Height = 1, + }; + + if (isProduction) + { + block.AddProductionAllocated(allocationId, certificateId, sliceHash); + } + else + { + block.AddConsumptionAllocated(allocationId, certificateId, sliceHash); + } + + _registryService.Setup(x => x.GetNextBlock(RegistryName, 0)).ReturnsAsync(block); + _repository.Setup(x => x.GetClaimIntent(certificateId, sliceHash)).ReturnsAsync(new ClaimIntent + { + Id = claimIntentId, + RegistryName = certificateId.RegistryName, + CertificateId = certificateId.StreamId, + CommitmentHash = sliceHash, + Quantity = fixture.Create(), + RandomR = fixture.Create(), + }); + + // Act + await _job.ProcessRegistryBlocks(RegistryName, 0, default); + + // Assert + _repository.Verify(x => x.UpsertReadBlock(It.Is(x => x.BlockHeight == 1)), Times.Once); + _repository.Verify(x => x.GetClaimIntent(certificateId, sliceHash), Times.Once); + _repository.Verify(x => x.InsertClaimAllocation(It.Is( + x => x.Id != Guid.Empty + && x.AllocationId == allocationId + && x.RegistryName == certificateId.RegistryName + && x.CertificateId == certificateId.StreamId + && x.ClaimIntentId == claimIntentId + )), Times.Once); + _repository.VerifyNoOtherCalls(); + } + + [Fact] + public async Task Verify_Claimed_NotFound_NotInserted() + { + // Arrange + var fixture = new Fixture(); + var certificateId = new FederatedCertificateId { RegistryName = RegistryName, StreamId = fixture.Create() }; + var allocationId = fixture.Create(); + var sliceHash = fixture.Create(); + var block = new Registry.V1.Block + { + Height = 1, + }; + block.AddClaim(allocationId, certificateId); + _registryService.Setup(x => x.GetNextBlock(RegistryName, 0)).ReturnsAsync(block); + + // Act + await _job.ProcessRegistryBlocks(RegistryName, 0, default); + + // Assert + _repository.Verify(x => x.UpsertReadBlock(It.Is(x => x.BlockHeight == 1)), Times.Once); + _repository.Verify(x => x.GetClaimAllocation(certificateId, allocationId), Times.Once); + _repository.VerifyNoOtherCalls(); + } + + + [Fact] + public async Task Verify_Claimed_Found_Inserted() + { + // Arrange + var fixture = new Fixture(); + var claimIntentId = fixture.Create(); + + var certificateId = new FederatedCertificateId { RegistryName = RegistryName, StreamId = fixture.Create() }; + var allocationId = fixture.Create(); + var sliceHash = fixture.Create(); + var quantity = fixture.Create(); + var randomR = fixture.Create(); + + var block = new Registry.V1.Block + { + Height = 1, + }; + block.AddClaim(allocationId, certificateId); + _registryService.Setup(x => x.GetNextBlock(RegistryName, 0)).ReturnsAsync(block); + _repository.Setup(x => x.GetClaimIntent(claimIntentId)).ReturnsAsync(new ClaimIntent + { + Id = claimIntentId, + RegistryName = certificateId.RegistryName, + CertificateId = certificateId.StreamId, + CommitmentHash = sliceHash, + Quantity = quantity, + RandomR = randomR, + }); + _repository.Setup(x => x.GetClaimAllocation(certificateId, allocationId)).ReturnsAsync(new ClaimAllocation + { + Id = Guid.NewGuid(), + ClaimIntentId = claimIntentId, + AllocationId = allocationId, + CertificateId = certificateId.StreamId, + RegistryName = certificateId.RegistryName, + }); + + // Act + await _job.ProcessRegistryBlocks(RegistryName, 0, default); + + // Assert + _repository.Verify(x => x.UpsertReadBlock(It.Is(x => x.BlockHeight == 1)), Times.Once); + _repository.Verify(x => x.GetClaimAllocation(certificateId, allocationId), Times.Once); + _repository.Verify(x => x.GetClaimIntent(claimIntentId), Times.Once); + _repository.Verify(x => x.InsertClaimRecord(It.Is( + x => x.Id != Guid.Empty + && x.RegistryName == certificateId.RegistryName + && x.CertificateId == certificateId.StreamId + && x.Quantity == quantity + && x.RandomR == randomR + )), Times.Once); + _repository.Verify(x => x.DeleteClaimIntent(claimIntentId), Times.Once); + _repository.Verify(x => x.DeleteClaimAllocation(It.IsAny()), Times.Once); + _repository.VerifyNoOtherCalls(); + } + + [Fact] + public async Task Verify_CrawlAllRegistries() + { + // Arrange + _repository.Setup(x => x.GetRegistriesToCrawl()).ReturnsAsync( + [ + new LastReadBlock { RegistryName = "registry1", BlockHeight = 0, ReadAt = DateTimeOffset.UtcNow - TimeSpan.FromDays(1) }, + new LastReadBlock { RegistryName = "registry2", BlockHeight = 3, ReadAt = DateTimeOffset.UtcNow - TimeSpan.FromSeconds(1) }, + ]); + + _registryService.Setup(x => x.GetNextBlock("registry2", 3)).ReturnsAsync(new Registry.V1.Block + { + Height = 4, + }); + _registryService.Setup(x => x.GetNextBlock("registry2", 4)).ReturnsAsync(new Registry.V1.Block + { + Height = 5, + }); + + // Act + await _job.ProcessAllRegistries(default); + + // Assert + _logger.Verify(x => x.Log(LogLevel.Trace, 0, It.IsAny(), null, It.IsAny>()), Times.Exactly(2)); + _logger.VerifyNoOtherCalls(); + + _registryService.Verify(x => x.GetNextBlock("registry1", 0), Times.Once); + _registryService.Verify(x => x.GetNextBlock("registry2", 3), Times.Once); + _registryService.Verify(x => x.GetNextBlock("registry2", 4), Times.Once); + _registryService.Verify(x => x.GetNextBlock("registry2", 5), Times.Once); + _registryService.VerifyNoOtherCalls(); + + _repository.Verify(x => x.GetRegistriesToCrawl(), Times.Once); + _repository.Verify(x => x.UpsertReadBlock(It.Is(x => x.RegistryName == "registry2" && x.BlockHeight == 4)), Times.Once); + _repository.Verify(x => x.UpsertReadBlock(It.Is(x => x.RegistryName == "registry2" && x.BlockHeight == 5)), Times.Once); + _repository.VerifyNoOtherCalls(); + + } +} + +public static class BlockExtensions +{ + public static void AddIssued(this Registry.V1.Block block, FederatedCertificateId id, DateTimeOffset start, DateTimeOffset end, string area) + { + block.Transactions.Add(new Registry.V1.Transaction + { + Header = new Registry.V1.TransactionHeader + { + PayloadType = Electricity.V1.IssuedEvent.Descriptor.FullName, + FederatedStreamId = id.ToProto(), + }, + Payload = new Electricity.V1.IssuedEvent + { + GridArea = area, + Period = new Electricity.V1.DateInterval + { + Start = Timestamp.FromDateTimeOffset(start), + End = Timestamp.FromDateTimeOffset(end), + }, + }.ToByteString() + }); + } + + public static void AddConsumptionAllocated(this Registry.V1.Block block, Guid allocationId, FederatedCertificateId id, byte[] sliceHash) + { + block.Transactions.Add(new Registry.V1.Transaction + { + Header = new Registry.V1.TransactionHeader + { + PayloadType = Electricity.V1.AllocatedEvent.Descriptor.FullName, + FederatedStreamId = id.ToProto(), + }, + Payload = new Electricity.V1.AllocatedEvent + { + AllocationId = new Common.V1.Uuid { Value = allocationId.ToString() }, + ConsumptionCertificateId = id.ToProto(), + ProductionCertificateId = new Common.V1.FederatedStreamId + { + Registry = id.RegistryName, + StreamId = new Common.V1.Uuid { Value = Guid.NewGuid().ToString() }, + }, + ConsumptionSourceSliceHash = ByteString.CopyFrom(sliceHash), + ProductionSourceSliceHash = ByteString.Empty, + EqualityProof = ByteString.Empty, + }.ToByteString() + }); + } + + public static void AddProductionAllocated(this Registry.V1.Block block, Guid allocationId, FederatedCertificateId id, byte[] sliceHash) + { + block.Transactions.Add(new Registry.V1.Transaction + { + Header = new Registry.V1.TransactionHeader + { + PayloadType = Electricity.V1.AllocatedEvent.Descriptor.FullName, + FederatedStreamId = id.ToProto(), + }, + Payload = new Electricity.V1.AllocatedEvent + { + AllocationId = new Common.V1.Uuid { Value = allocationId.ToString() }, + ConsumptionCertificateId = new Common.V1.FederatedStreamId + { + Registry = id.RegistryName, + StreamId = new Common.V1.Uuid { Value = Guid.NewGuid().ToString() }, + }, + ProductionCertificateId = id.ToProto(), + ConsumptionSourceSliceHash = ByteString.Empty, + ProductionSourceSliceHash = ByteString.CopyFrom(sliceHash), + EqualityProof = ByteString.Empty, + }.ToByteString() + }); + } + + public static void AddClaim(this Registry.V1.Block block, Guid allocationId, FederatedCertificateId id) + { + block.Transactions.Add(new Registry.V1.Transaction + { + Header = new Registry.V1.TransactionHeader + { + PayloadType = Electricity.V1.ClaimedEvent.Descriptor.FullName, + FederatedStreamId = id.ToProto(), + }, + Payload = new Electricity.V1.ClaimedEvent + { + AllocationId = new Common.V1.Uuid { Value = allocationId.ToString() }, + CertificateId = id.ToProto(), + }.ToByteString() + }); + } +} + diff --git a/src/ProjectOrigin.Chronicler.Test/ChroniclerOptionsTests.cs b/src/ProjectOrigin.Chronicler.Test/ChroniclerOptionsTests.cs index 8c77fea..c5cb041 100644 --- a/src/ProjectOrigin.Chronicler.Test/ChroniclerOptionsTests.cs +++ b/src/ProjectOrigin.Chronicler.Test/ChroniclerOptionsTests.cs @@ -1,3 +1,4 @@ +using System; using System.Collections.Generic; using System.ComponentModel.DataAnnotations; using System.IO; @@ -16,7 +17,8 @@ public void ChroniclerOptions_EmptyFilename() // Arrange var options = new ChroniclerOptions() { - SigningKeyFilename = string.Empty + SigningKeyFilename = string.Empty, + JobInterval = TimeSpan.FromMinutes(1) }; // Act @@ -34,7 +36,8 @@ public void ChroniclerOptions_FileNotFound() // Arrange var options = new ChroniclerOptions() { - SigningKeyFilename = "example.key" + SigningKeyFilename = "example.key", + JobInterval = TimeSpan.FromMinutes(1) }; // Act @@ -53,7 +56,8 @@ public void ChroniclerOptions_InvalidFormat() var filename = Path.GetTempFileName(); var options = new ChroniclerOptions() { - SigningKeyFilename = filename + SigningKeyFilename = filename, + JobInterval = TimeSpan.FromMinutes(1) }; var data = "invalid data"; File.WriteAllText(filename, data); @@ -75,7 +79,8 @@ public void ChroniclerOptions_Valid() var filename = Path.GetTempFileName(); var options = new ChroniclerOptions() { - SigningKeyFilename = filename + SigningKeyFilename = filename, + JobInterval = TimeSpan.FromMinutes(1) }; var data = key.ExportPkixText(); File.WriteAllText(filename, data); diff --git a/src/ProjectOrigin.Chronicler.Test/ChroniclerRepositoryTests.cs b/src/ProjectOrigin.Chronicler.Test/ChroniclerRepositoryTests.cs new file mode 100644 index 0000000..caeaec1 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Test/ChroniclerRepositoryTests.cs @@ -0,0 +1,379 @@ +using System; +using System.Data; +using System.Threading.Tasks; +using AutoFixture; +using Dapper; +using FluentAssertions; +using Npgsql; +using ProjectOrigin.Chronicler.Server; +using ProjectOrigin.Chronicler.Server.Models; +using ProjectOrigin.Chronicler.Server.Repositories; +using ProjectOrigin.Chronicler.Test.FixtureCustomizations; +using ProjectOrigin.TestCommon.Fixtures; +using Xunit; + +namespace ProjectOrigin.Chronicler.Test; + +public sealed class ChroniclerRepositoryTests : IClassFixture>, IDisposable +{ + private readonly PostgresDatabaseFixture _databaseFixture; + private readonly Fixture _fixture = new(); + private readonly IDbConnection _con; + private readonly ChroniclerRepository _repository; + + public ChroniclerRepositoryTests(PostgresDatabaseFixture databaseFixture) + { + _databaseFixture = databaseFixture; + _databaseFixture.ResetDatabase(); + _fixture.Customizations.Add(new MicrosecondDateTimeOffsetGenerator()); + + _con = new NpgsqlConnection(_databaseFixture.HostConnectionString); + _con.Open(); + _repository = new ChroniclerRepository(_con.BeginTransaction()); + } + + public void Dispose() + { + _con.Dispose(); + } + + [Fact] + public async Task InsertClaimIntent_InsertsClaimIntent() + { + // Arrange + var intent = _fixture.Create(); + + // Act + await _repository.InsertClaimIntent(intent); + + // Assert + _con.QuerySingle("SELECT * FROM claim_intents").Should().BeEquivalentTo(intent); + } + + [Fact] + public async Task InsertClaimIntent_InsertsClaimIntentAndLastReadBlock() + { + // Arrange + var intent = _fixture.Create(); + + // Act + await _repository.InsertClaimIntent(intent); + + // Assert + _con.QuerySingle("SELECT * FROM claim_intents").Should().BeEquivalentTo(intent); + var block = _con.QuerySingle("SELECT * FROM read_blocks"); + block.RegistryName.Should().Be(intent.RegistryName); + block.BlockHeight.Should().Be(-1); + } + + [Fact] + public async Task InsertClaimIntent_InsertsClaimIntentAndNotChangeLastReadBlock() + { + // Arrange + var intent = _fixture.Create(); + var block = new LastReadBlock + { + RegistryName = intent.RegistryName, + BlockHeight = 5, + ReadAt = _fixture.Create() + }; + await _repository.UpsertReadBlock(block); + + // Act + await _repository.InsertClaimIntent(intent); + + // Assert + _con.QuerySingle("SELECT * FROM claim_intents").Should().BeEquivalentTo(intent); + _con.QuerySingle("SELECT * FROM read_blocks").Should().BeEquivalentTo(block); + } + + [Fact] + public async Task GetClaimIntent_ReturnsClaimIntent() + { + // Arrange + var intent = _fixture.Create(); + await _repository.InsertClaimIntent(intent); + + // Act + var result = await _repository.GetClaimIntent(intent.Id); + + // Assert + result.Should().BeEquivalentTo(intent); + } + + [Fact] + public async Task GetClaimIntent_ThrowsExceptionWhenNotFound() + { + // Act + await Assert.ThrowsAsync(() => _repository.GetClaimIntent(new FederatedCertificateId + { + RegistryName = _fixture.Create(), + StreamId = Guid.NewGuid() + }, new byte[32])); + } + + [Fact] + public async Task GetClaimIntentFromFid_ReturnsClaimIntent() + { + // Arrange + var intent = _fixture.Create(); + await _repository.InsertClaimIntent(intent); + + // Act + var result = await _repository.GetClaimIntent(new FederatedCertificateId + { + RegistryName = intent.RegistryName, + StreamId = intent.CertificateId + }, intent.CommitmentHash); + + // Assert + result.Should().BeEquivalentTo(intent); + } + + [Fact] + public async Task GetClaimIntentFromFid_ThrowsExceptionWhenNotFound() + { + // Act + await Assert.ThrowsAsync(() => _repository.GetClaimIntent(new FederatedCertificateId + { + RegistryName = _fixture.Create(), + StreamId = Guid.NewGuid() + }, new byte[32])); + } + + [Fact] + public async Task HasClaimIntent_ReturnsFalse_WhenClaimIntentDoesNotExist() + { + // Act + var result = await _repository.HasClaimIntent(new FederatedCertificateId + { + RegistryName = _fixture.Create(), + StreamId = Guid.NewGuid() + }, new byte[32]); + + // Assert + Assert.False(result); + } + + [Fact] + public async Task HasClaimIntent_ReturnsTrue_WhenClaimIntentExists() + { + // Arrange + var intent = _fixture.Create(); + await _repository.InsertClaimIntent(intent); + + // Act + var result = await _repository.HasClaimIntent(new FederatedCertificateId + { + RegistryName = intent.RegistryName, + StreamId = intent.CertificateId + }, intent.CommitmentHash); + + // Assert + Assert.True(result); + } + + [Fact] + public async Task HasClaimIntent_ReturnsFalse_WhenOtherClaimIntentExists() + { + // Arrange + var intent1 = _fixture.Create(); + var intent2 = _fixture.Create(); + await _repository.InsertClaimIntent(intent1); + + // Act + var result = await _repository.HasClaimIntent(new FederatedCertificateId + { + RegistryName = intent2.RegistryName, + StreamId = intent2.CertificateId + }, intent2.CommitmentHash); + + // Assert + Assert.False(result); + } + + [Fact] + public async Task DeleteClaimIntent_DeletesClaimIntent() + { + // Arrange + var intent = _fixture.Create(); + await _repository.InsertClaimIntent(intent); + + // Act + await _repository.DeleteClaimIntent(intent.Id); + + // Assert + Assert.Empty(_con.Query("SELECT * FROM claim_intents")); + } + + [Fact] + public async Task DeleteClaimIntent_DoesNotThrowExceptionWhenClaimIntentDoesNotExist() + { + // Arrange + Task task = _repository.DeleteClaimIntent(Guid.NewGuid()); + + // Act + await task; + + // Assert + task.Status.Should().Be(TaskStatus.RanToCompletion); + } + + [Fact] + public async Task DeleteClaimIntent_DoesNotDeleteOtherClaimIntents() + { + // Arrange + var intent1 = _fixture.Create(); + var intent2 = _fixture.Create(); + await _repository.InsertClaimIntent(intent1); + await _repository.InsertClaimIntent(intent2); + + // Act + await _repository.DeleteClaimIntent(intent1.Id); + + // Assert + Assert.Single(_con.Query("SELECT * FROM claim_intents")); + } + + [Fact] + public async Task UpsertReadBlock_InsertsBlock() + { + // Arrange + var registry = _fixture.Create(); + + // Act + await _repository.UpsertReadBlock(registry); + + // Assert + _con.QuerySingle("SELECT * FROM read_blocks").Should().BeEquivalentTo(registry); + } + + [Fact] + public async Task UpsertReadBlock_UpdatesBlock() + { + // Arrange + var registry = _fixture.Create(); + await _repository.UpsertReadBlock(registry); + + var updatedRegistry = new LastReadBlock + { + RegistryName = registry.RegistryName, + BlockHeight = registry.BlockHeight + 1, + ReadAt = registry.ReadAt + }; + + // Act + await _repository.UpsertReadBlock(updatedRegistry); + + // Assert + _con.QuerySingle("SELECT * FROM read_blocks").Should().BeEquivalentTo(updatedRegistry); + } + + [Fact] + public async Task GetRegistriesToCrawl_ReturnsRegistries() + { + // Arrange + var registry1 = _fixture.Create(); + var registry2 = _fixture.Create(); + await _repository.UpsertReadBlock(registry1); + await _repository.UpsertReadBlock(registry2); + + // Act + var result = await _repository.GetRegistriesToCrawl(); + + // Assert + result.Should().BeEquivalentTo(new[] { registry1, registry2 }); + } + + [Fact] + public async Task GetRegistriesToCrawl_ReturnsEmptyListWhenNoRegistries() + { + // Act + var result = await _repository.GetRegistriesToCrawl(); + + // Assert + Assert.Empty(result); + } + + [Fact] + public async Task InsertCertificateInfo_InsertsCertificateInfo() + { + // Arrange + var certificateInfo = _fixture.Create(); + + // Act + await _repository.InsertCertificateInfo(certificateInfo); + + // Assert + _con.QuerySingle("SELECT * FROM certificate_infos").Should().BeEquivalentTo(certificateInfo); + } + + [Fact] + public async Task InsertClaimAllocation_InsertsClaimAllocation() + { + // Arrange + var allocation = _fixture.Create(); + + // Act + await _repository.InsertClaimAllocation(allocation); + + // Assert + _con.QuerySingle("SELECT * FROM claim_allocations").Should().BeEquivalentTo(allocation); + } + + [Fact] + public async Task GetClaimAllocation_ReturnsClaimAllocation() + { + // Arrange + var allocation = _fixture.Create(); + await _repository.InsertClaimAllocation(allocation); + + // Act + var result = await _repository.GetClaimAllocation(new FederatedCertificateId + { + RegistryName = allocation.RegistryName, + StreamId = allocation.CertificateId + }, allocation.AllocationId); + + // Assert + result.Should().BeEquivalentTo(allocation); + } + + [Fact] + public async Task GetClaimAllocation_ThrowsExceptionWhenNotFound() + { + // Act + await Assert.ThrowsAsync(() => _repository.GetClaimAllocation(new FederatedCertificateId + { + RegistryName = _fixture.Create(), + StreamId = Guid.NewGuid() + }, Guid.NewGuid())); + } + + [Fact] + public async Task DeleteClaimAllocation_DeletesClaimAllocation() + { + // Arrange + var allocation = _fixture.Create(); + await _repository.InsertClaimAllocation(allocation); + + // Act + await _repository.DeleteClaimAllocation(allocation.Id); + + // Assert + Assert.Empty(_con.Query("SELECT * FROM claim_allocations")); + } + + [Fact] + public async Task InsertClaimRecord_InsertsClaimRecord() + { + // Arrange + var record = _fixture.Create(); + + // Act + await _repository.InsertClaimRecord(record); + + // Assert + _con.QuerySingle("SELECT * FROM claim_records").Should().BeEquivalentTo(record); + } +} diff --git a/src/ProjectOrigin.Chronicler.Test/ChroniclerServiceTests.cs b/src/ProjectOrigin.Chronicler.Test/ChroniclerServiceTests.cs index efcd908..77f23fd 100644 --- a/src/ProjectOrigin.Chronicler.Test/ChroniclerServiceTests.cs +++ b/src/ProjectOrigin.Chronicler.Test/ChroniclerServiceTests.cs @@ -1,4 +1,6 @@ using System; +using System.Collections.Generic; +using System.Text.Json; using System.Threading.Tasks; using AutoFixture; using FluentAssertions; @@ -18,18 +20,26 @@ public class ChroniclerServiceTests : TestServerBase, IClassFixture _databaseFixture; public ChroniclerServiceTests( TestServerFixture serverFixture, PostgresDatabaseFixture databaseFixture, ITestOutputHelper outputHelper) : base(serverFixture, outputHelper) { - _databaseFixture = databaseFixture; + var path = TempFile.WriteAllText(JsonSerializer.Serialize(new + { + RegistryUrls = new Dictionary + { + } + })); + serverFixture.ConfigureHostConfiguration(new() { - { "ConnectionStrings:Database", databaseFixture.HostConnectionString }, + { "ConnectionStrings:Database", databaseFixture.HostConnectionString}, { "Chronicler:SigningKeyFilename", TempFile.WriteAllText(_privateKey.ExportPkixText()) }, + { "Chronicler:JobInterval", "00:15:00" }, + { "Network:ConfigurationUri", "file://"+path }, + { "Network:RefreshInterval", "00:15:00" }, }); } @@ -39,7 +49,7 @@ public async Task ServiceReturnsSingature() // Arrange var registryName = _fixture.Create(); var certId = _fixture.Create(); - var client = new V1.RegistryService.RegistryServiceClient(Channel); + var client = new V1.ChroniclerService.ChroniclerServiceClient(Channel); var quantity = _fixture.Create(); var commitmentInfo = new SecretCommitmentInfo((uint)quantity); diff --git a/src/ProjectOrigin.Chronicler.Test/FixtureCustomizations/MicrosecondDateTimeOffsetGenerator.cs b/src/ProjectOrigin.Chronicler.Test/FixtureCustomizations/MicrosecondDateTimeOffsetGenerator.cs new file mode 100644 index 0000000..f262550 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Test/FixtureCustomizations/MicrosecondDateTimeOffsetGenerator.cs @@ -0,0 +1,20 @@ +using System; +using AutoFixture.Kernel; + +namespace ProjectOrigin.Chronicler.Test.FixtureCustomizations; + +public class MicrosecondDateTimeOffsetGenerator : ISpecimenBuilder +{ + public object Create(object request, ISpecimenContext context) + { + // If the request is for a DateTimeOffset, return a DateTimeOffset with the ticks rounded to the nearest 10 microseconds. + // This is because the PostgreSQL timestamp type only has microsecond precision. + if (request is SeededRequest sr && sr.Request is Type t && t == typeof(DateTimeOffset)) + { + var dateTimeOffset = (DateTimeOffset)context.Resolve(typeof(DateTimeOffset)); + return new DateTimeOffset(dateTimeOffset.Ticks / 10 * 10, dateTimeOffset.Offset); + } + + return new NoSpecimen(); + } +} diff --git a/src/ProjectOrigin.Chronicler.Test/RegistryClientFactoryTests.cs b/src/ProjectOrigin.Chronicler.Test/RegistryClientFactoryTests.cs new file mode 100644 index 0000000..e3e90f9 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Test/RegistryClientFactoryTests.cs @@ -0,0 +1,111 @@ +using Xunit; +using Moq; +using ProjectOrigin.Chronicler.Server; +using FluentAssertions; +using Microsoft.Extensions.Options; +using ProjectOrigin.Chronicler.Server.Options; +using System.Collections.Generic; +using System; +using ProjectOrigin.Chronicler.Server.Exceptions; + +namespace ProjectOrigin.Chronicler.Test; + +public class RegistryClientFactoryTests +{ + private const string RegistryName = "someRegistry"; + + [Fact] + public void GetClient_WhenRegistryNotKnown_ThrowsRegistryNotKnownException() + { + // Arrange + var mockOptionsMonitor = new Mock>(); + var options = new NetworkOptions + { + RegistryUrls = new Dictionary() + }; + mockOptionsMonitor.Setup(m => m.CurrentValue).Returns(options); + var factory = new RegistryClientFactory(mockOptionsMonitor.Object); + + // Act + Action act = () => factory.GetClient(RegistryName); + + // Assert + act.Should().Throw(); + } + + [Fact] + public void GetClient_WhenRegistryKnown_ReturnsClient() + { + // Arrange + var mockOptionsMonitor = new Mock>(); + var options = new NetworkOptions + { + RegistryUrls = new Dictionary{ + {RegistryName, "http://example.com"} + } + }; + mockOptionsMonitor.Setup(m => m.CurrentValue).Returns(options); + var factory = new RegistryClientFactory(mockOptionsMonitor.Object); + + // Act + var result = factory.GetClient(RegistryName); + + // Assert + Assert.NotNull(result); + } + + [Fact] + public void GetChannel_WhenCalledMultipleTimes_ReturnsSameInstance() + { + // Arrange + var mockOptionsMonitor = new Mock>(); + var options = new NetworkOptions + { + RegistryUrls = new Dictionary{ + {RegistryName, "http://example.com"} + } + }; + mockOptionsMonitor.Setup(m => m.CurrentValue).Returns(options); + var factory = new RegistryClientFactory(mockOptionsMonitor.Object); + + // Act + var result1 = factory.GetChannel(RegistryName); + var result2 = factory.GetChannel(RegistryName); + + // Assert + Assert.Same(result1, result2); + } + + [Fact] + public void GetChannel_NewInstance_IfMonitorChanged() + { + // Arrange + Action? onChange = null; + var mockOptionsMonitor = new Mock>(); + mockOptionsMonitor.Setup(m => m.OnChange(It.IsAny>())).Callback>((action) => onChange = action); + + var options = new NetworkOptions + { + RegistryUrls = new Dictionary{ + {RegistryName, "http://example.com"} + } + }; + mockOptionsMonitor.Setup(m => m.CurrentValue).Returns(options); + var factory = new RegistryClientFactory(mockOptionsMonitor.Object); + + // Act + var result1 = factory.GetChannel(RegistryName); + var options2 = new NetworkOptions + { + RegistryUrls = new Dictionary{ + {RegistryName, "http://example.com"} + } + }; + mockOptionsMonitor.Setup(m => m.CurrentValue).Returns(options); + onChange?.Invoke(options2, null); + var result2 = factory.GetChannel(RegistryName); + + // Assert + Assert.NotSame(result1, result2); + } +} diff --git a/src/ProjectOrigin.Chronicler.Test/RegistryServiceTests.cs b/src/ProjectOrigin.Chronicler.Test/RegistryServiceTests.cs new file mode 100644 index 0000000..1571d9d --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Test/RegistryServiceTests.cs @@ -0,0 +1,105 @@ +using System.Threading.Tasks; +using Xunit; +using Moq; +using ProjectOrigin.Chronicler.Server; +using Grpc.Core; +using FluentAssertions; + +namespace ProjectOrigin.Chronicler.Test; + +public class RegistryServiceTests +{ + private const string RegistryName = "someRegistry"; + private const string GridArea = "Narnia"; + private readonly Mock _factory; + + public RegistryServiceTests() + { + _factory = new Mock(); + } + + [Fact] + public async Task GetNextBlock_EmptyResponse_ReturnsNull() + { + // Arrange + var registryService = new RegistryService(_factory.Object); + var mockClient = new Mock(); + mockClient.Setup(x => x.GetBlocksAsync(It.IsAny(), null, null, default)) + .Returns(CreateAsyncUnaryCall(new Registry.V1.GetBlocksResponse + { + Blocks = { } + })); + _factory.Setup(x => x.GetClient(RegistryName)).Returns(mockClient.Object); + + // Act + var result = await registryService.GetNextBlock(RegistryName, 0); + + // Assert + _factory.Verify(x => x.GetClient(RegistryName), Times.Once); + result.Should().BeNull(); + } + + [Theory] + [InlineData(3)] + [InlineData(6)] + [InlineData(8)] + public async Task GetNextBlock_ContainsSingle_ReturnsSingle(int height) + { + // Arrange + var registryService = new RegistryService(_factory.Object); + var mockClient = new Mock(); + mockClient.Setup(x => x.GetBlocksAsync(It.IsAny(), null, null, default)) + .Returns(CreateAsyncUnaryCall(new Registry.V1.GetBlocksResponse + { + Blocks = { + new Registry.V1.Block { Height = height+1 } + } + })); + _factory.Setup(x => x.GetClient(RegistryName)).Returns(mockClient.Object); + + // Act + var result = await registryService.GetNextBlock(RegistryName, height); + + // Assert + _factory.Verify(x => x.GetClient(RegistryName), Times.Once); + mockClient.Verify(x => x.GetBlocksAsync(It.Is( + x => x.Skip == height + && x.Limit == 1 + && x.IncludeTransactions == true + ), null, null, default), Times.Once); + mockClient.VerifyNoOtherCalls(); + result.Should().NotBeNull(); + result!.Height.Should().Be(height + 1); + } + + [Fact] + public async Task GetNextBlock_ContainsMultiple_RaiseError() + { + // Arrange + var registryService = new RegistryService(_factory.Object); + var mockClient = new Mock(); + mockClient.Setup(x => x.GetBlocksAsync(It.IsAny(), null, null, default)) + .Returns(CreateAsyncUnaryCall(new Registry.V1.GetBlocksResponse + { + Blocks = { + new Registry.V1.Block { Height = 0 }, + new Registry.V1.Block { Height = 1 } + } + })); + _factory.Setup(x => x.GetClient(RegistryName)).Returns(mockClient.Object); + + // Assert + await Assert.ThrowsAsync(() => registryService.GetNextBlock(RegistryName, 0)); + } + + private static AsyncUnaryCall CreateAsyncUnaryCall(TResponse response) + { + return new AsyncUnaryCall( + Task.FromResult(response), + Task.FromResult(new Metadata()), + () => Status.DefaultSuccess, + () => new Metadata(), + () => { }); + } + +} diff --git a/src/ProjectOrigin.Chronicler.Test/UriOptionsLoaderTests.cs b/src/ProjectOrigin.Chronicler.Test/UriOptionsLoaderTests.cs new file mode 100644 index 0000000..c817db7 --- /dev/null +++ b/src/ProjectOrigin.Chronicler.Test/UriOptionsLoaderTests.cs @@ -0,0 +1,183 @@ +using System.Collections.Generic; +using System.Threading.Tasks; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Options; +using WireMock.RequestBuilders; +using WireMock.ResponseBuilders; +using WireMock.Server; +using Xunit; +using ProjectOrigin.ServiceCommon.UriOptionsLoader; +using Microsoft.Extensions.Configuration.Memory; +using Microsoft.AspNetCore.Hosting; +using FluentAssertions; +using System; + +namespace ProjectOrigin.Chronicler.Test; + +public class UriOptionsLoaderTests +{ + private const string TestPath = "/TestPath"; + private const string Scenario = "MyScenario"; + private const string SecondCallState = "Second Call"; + + [Fact] + public async Task WhenChanged_MonitorCalled() + { + var _networkMockServer = WireMockServer.Start(); + + var _builder = new HostBuilder(); + _builder.ConfigureHostConfiguration(config => + config.Add(new MemoryConfigurationSource() + { + InitialData = new Dictionary() + { + { "TestPath:ConfigurationUri", _networkMockServer.Urls[0] + TestPath }, + { "TestPath:RefreshInterval", "00:00:10" }, + } + })); + + _builder.ConfigureServices((context, services) => + { + services.AddHttpClient(); + services.ConfigureUriOptionsLoader("TestPath"); + services.AddSingleton(); + }); + + _networkMockServer.Given(Request.Create().WithPath(TestPath).UsingGet()) + .InScenario(Scenario) + .WillSetStateTo(SecondCallState) + .RespondWith(Response.Create() + .WithStatusCode(200) + .WithHeader("Content-Type", "application/json") + .WithBodyAsJson(new + { + SomeKey = "SomeValue", + })); + + _networkMockServer.Given(Request.Create().WithPath(TestPath).UsingGet()) + .InScenario(Scenario) + .WhenStateIs(SecondCallState) + .RespondWith(Response.Create() + .WithStatusCode(200) + .WithHeader("Content-Type", "application/json") + .WithBodyAsJson(new + { + SomeKey = "SomeOtherValue", + })); + + + using var host = _builder.Build(); + await Task.Run(host.Start); + + var listener = host.Services.GetRequiredService(); + + TestOption currentOption = listener.MonitorOption.CurrentValue; + TestOption? newOption = null; + var taskCompletionSource = new TaskCompletionSource(); + + listener.MonitorOption.OnChange((options, name) => + { + newOption = options; + taskCompletionSource.SetResult(true); + }); + + await taskCompletionSource.Task; + + Assert.NotEqual(currentOption, newOption); + } + + [Fact] + public async Task WhenNotChanged_MonitorNotCalled() + { + var _networkMockServer = WireMockServer.Start(); + + var _builder = new HostBuilder(); + _builder.ConfigureHostConfiguration(config => + config.Add(new MemoryConfigurationSource() + { + InitialData = new Dictionary() + { + { "TestPath:ConfigurationUri", _networkMockServer.Urls[0] + TestPath }, + { "TestPath:RefreshInterval", "00:00:5" }, + } + })); + + _builder.ConfigureServices((context, services) => + { + services.AddHttpClient(); + services.ConfigureUriOptionsLoader("TestPath"); + services.AddSingleton(); + }); + + _networkMockServer.Given(Request.Create().WithPath(TestPath).UsingGet()) + .RespondWith(Response.Create() + .WithStatusCode(200) + .WithHeader("Content-Type", "application/json") + .WithBodyAsJson(new + { + SomeKey = "SomeValue", + })); + + using var host = _builder.Build(); + await Task.Run(host.Start); + + var listener = host.Services.GetRequiredService(); + bool called = false; + + listener.MonitorOption.OnChange((options, name) => + { + called = true; + }); + + await Task.Delay(13000); + _networkMockServer.LogEntries.Should().HaveCount(3); + called.Should().BeFalse(); + } + + [Fact] + public async Task NotSupportedFormat_ThrowsException() + { + // Arrange + var _builder = new HostBuilder(); + _builder.ConfigureHostConfiguration(config => + config.Add(new MemoryConfigurationSource() + { + InitialData = new Dictionary() + { + { "TestPath:ConfigurationUri", "some://hello.com" }, + { "TestPath:RefreshInterval", "00:00:5" }, + } + })); + + + _builder.ConfigureServices((context, services) => + { + services.AddHttpClient(); + services.ConfigureUriOptionsLoader("TestPath"); + }); + + using var host = _builder.Build(); + + // Act + var exception = await Assert.ThrowsAsync(() => Task.Run(host.Start)); + + // Assert + exception.Message.Should().Be("Unsupported URI scheme: some"); + } + + internal class NetworkOptionsChangeListener + { + public IOptionsMonitor MonitorOption { get; } + + public NetworkOptionsChangeListener(IOptionsMonitor optionsMonitor) + { + MonitorOption = optionsMonitor; + } + } + + internal record TestOption + { + public required string SomeKey { get; init; } + } +} diff --git a/src/Protos/chronicler.proto b/src/Protos/chronicler.proto index 7961774..67fd13b 100644 --- a/src/Protos/chronicler.proto +++ b/src/Protos/chronicler.proto @@ -4,7 +4,7 @@ import "common.proto"; package project_origin.chronicler.v1; -service RegistryService { +service ChroniclerService { // Register an intent to claim a certificate rpc RegisterClaimIntent (ClaimIntentRequest) returns (ClaimIntentResponse); } diff --git a/src/Protos/electricity.proto b/src/Protos/electricity.proto new file mode 100644 index 0000000..c276554 --- /dev/null +++ b/src/Protos/electricity.proto @@ -0,0 +1,91 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "common.proto"; + +package project_origin.electricity.v1; + +message IssuedEvent { + project_origin.common.v1.FederatedStreamId certificate_id = 1; + GranularCertificateType type = 2; + DateInterval period = 3; + string grid_area = 4; + bytes asset_id_hash = 5; + Commitment quantity_commitment = 6; + PublicKey owner_public_key = 7; + repeated Attribute attributes = 8; +} + +message TransferredEvent{ + project_origin.common.v1.FederatedStreamId certificate_id = 1; + bytes source_slice_hash = 2; + PublicKey new_owner = 3; +} + +message SlicedEvent{ + message Slice { + Commitment quantity = 1; + PublicKey new_owner = 2; + } + + project_origin.common.v1.FederatedStreamId certificate_id = 1; + bytes source_slice_hash = 2; + repeated Slice new_slices = 3; + bytes sum_proof = 4; +} + +message AllocatedEvent { + project_origin.common.v1.Uuid allocation_id = 1; + project_origin.common.v1.FederatedStreamId production_certificate_id = 2; + project_origin.common.v1.FederatedStreamId consumption_certificate_id = 3; + bytes production_source_slice_hash = 4; + bytes consumption_source_slice_hash = 5; + bytes equality_proof = 6; +} + +message ClaimedEvent { + project_origin.common.v1.FederatedStreamId certificate_id = 1; + project_origin.common.v1.Uuid allocation_id = 2; +} + +enum GranularCertificateType { + INVALID = 0; + CONSUMPTION = 1; + PRODUCTION = 2; +} + +message Attribute { + string key = 1; + string value = 2; + AttributeType type = 3; +} + +enum AttributeType { + CLEARTEXT = 0; + HASHED = 1; +} + +message DateInterval { + google.protobuf.Timestamp start = 1; + google.protobuf.Timestamp end = 2; +} + +message Commitment { + bytes content = 1; + bytes range_proof = 2; +} + +message CommitmentPublication { + uint32 message = 1; + bytes r_value = 2; +} + +message PublicKey { + bytes content = 1; + KeyType type = 2; +} + +enum KeyType { + Secp256k1 = 0; + ED25519 = 1; +} diff --git a/src/Protos/registry.proto b/src/Protos/registry.proto new file mode 100644 index 0000000..277e6e4 --- /dev/null +++ b/src/Protos/registry.proto @@ -0,0 +1,174 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "common.proto"; + +package project_origin.registry.v1; + +service RegistryService { + rpc SendTransactions (SendTransactionsRequest) returns (SubmitTransactionResponse); + rpc GetStreamTransactions (GetStreamTransactionsRequest) returns (GetStreamTransactionsResponse); + rpc GetTransactionStatus(GetTransactionStatusRequest) returns (GetTransactionStatusResponse); + rpc GetTransactionProof (GetTransactionProofRequest) returns (GetTransactionProofResponse); + rpc GetBlocks(GetBlocksRequest) returns (GetBlocksResponse); +} + +message GetBlocksRequest { + // Number of blocks to skip. + int32 skip = 1; + + // The maximum number of blocks to return. + int32 limit = 2; + + // If true, the result will include the transactions in the blocks. + bool include_transactions = 3; +} + +message GetBlocksResponse { + // The blocks. + repeated Block blocks = 1; +} + +message GetBlockTransactionsRequest { + // The block number to get transactions from, 0 based. + int32 block_number = 1; +} + +message GetBlockTransactionsResponse { + // The transactions in the block. + repeated Transaction transactions = 1; +} + +message SendTransactionsRequest { + // list of transactions to be processed + repeated Transaction transactions = 1; +} + +message SubmitTransactionResponse {} + +message GetStreamTransactionsRequest { + // The id of the transaction to get stream of transactions from. + project_origin.common.v1.Uuid stream_id = 1; +} + +message GetStreamTransactionsResponse { + // The stream of transactions. + repeated Transaction transactions = 1; +} + +message GetTransactionStatusRequest { + //Base64 encoded SHA256 hash of the transaction + string id = 1; +} + +message GetTransactionStatusResponse { + // The state of the transaction. + TransactionState status = 1; + + // The error message if the transaction is invalid. + string message = 2; +} + +enum TransactionState { + UNKNOWN = 0; + PENDING = 1; + FAILED = 2; + COMMITTED = 3; +} + +message GetTransactionProofRequest { + //Base64 encoded SHA256 hash of the transaction + string id = 1; +} + +message GetTransactionProofResponse { + MerkleProof proof = 1; +} + +message TransactionHeader { + // The target stream for which this transaction is intended + project_origin.common.v1.FederatedStreamId federated_stream_id = 1; + + // The type of the payload, determines how the payload should be + // deserialized and validated. + string payload_type = 2; + + // The SHA-512 hash of the serialized payload. + bytes payload_sha512 = 3; + + // A random string that provides uniqueness for transactions with + // otherwise identical fields. + string nonce = 4; +} + +// A transaction holds a single atomic change to the state of a stream signed by the owner. +message Transaction { + // The metadata of the transaction, + // helps the transaction processor process the transaction. + TransactionHeader header = 1; + + // The signature of the transaction header. + bytes header_signature = 2; + + // The serialized payload of the transaction + bytes payload = 3; +} + +message MerkleProof { + Transaction transaction = 1; + int32 leaf_index = 2; + repeated bytes hashes = 3; + bytes block_id = 4; +} + +message BlockHeader { + // SHA-256 hash of the previous block header. + bytes previous_header_hash = 1; + + // SHA-256 hash of the previous publication. + bytes previous_publication_hash = 2; + + // SHA-256 hash of the merkle root of the transactions in the block. + bytes merkle_root_hash = 3; + + // Timestamp of when the block was created. + google.protobuf.Timestamp created_at = 4; +} + +// A block in the immutable log. +message Block { + // Height of the block in the chain. + int32 height = 1; + + // The header of the block. + BlockHeader header = 2; + + // The publication of the block. + BlockPublication publication = 3; + + // The transactions in the block. + repeated Transaction transactions = 4; +} + +// A publication of a block. +message BlockPublication { + // Contains the type of the publication. + oneof immutable_log_type { + // Should only be used for testing. + LogEntry log_entry = 1; + Concordium concordium = 2; + } + + message LogEntry { + // SHA-256 hash of the block header. + bytes block_header_hash = 1; + } + + message Concordium { + // The hash of the transaction on the Concordium blockchain. + bytes transaction_hash = 1; + + // The hash of the block on the Concordium blockchain. + bytes block_hash = 2; + } +} From 5f8cfb96c191bbf80adaa38ee37f0934fe19ca08 Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Tue, 13 Aug 2024 12:31:36 +0000 Subject: [PATCH 02/10] removed default automountServiceAccountToken --- chart/run_kind_test.sh | 40 +++++++++++++++--------- chart/templates/deployment.yaml | 52 +++++++++++++++++++++----------- chart/templates/migrate-job.yaml | 1 + chart/tests/deployment_test.yaml | 8 ++--- 4 files changed, 64 insertions(+), 37 deletions(-) diff --git a/chart/run_kind_test.sh b/chart/run_kind_test.sh index 60488a3..ec25a9b 100755 --- a/chart/run_kind_test.sh +++ b/chart/run_kind_test.sh @@ -5,6 +5,7 @@ # Define kind cluster name cluster_name=chronicler-test +namespace=chronicler # Ensures script fails if something goes wrong. set -eo pipefail @@ -12,24 +13,24 @@ set -eo pipefail # define cleanup function cleanup() { rm -fr $temp_folderx - kind delete cluster -n ${cluster_name} >/dev/null 2>&1 + kind delete cluster --name ${cluster_name} >/dev/null 2>&1 } # define debug function debug() { echo -e "\nDebugging information:" echo -e "\nHelm status:" - helm status chronicler -n chronicler --show-desc --show-resources + helm status chronicler --namespace ${namespace} --show-desc --show-resources echo -e "\nDeployment description:" - kubectl describe deployment -n chronicler po-chronicler-deployment + kubectl describe deployment --namespace ${namespace} po-chronicler-deployment - POD_NAMES=$(kubectl get pods -n chronicler -l app=po-chronicler -o jsonpath="{.items[*].metadata.name}") + POD_NAMES=$(kubectl get pods --namespace ${namespace} -l app=po-chronicler -o jsonpath="{.items[*].metadata.name}") # Loop over the pods and print their logs for POD_NAME in $POD_NAMES do echo -e "\nLogs for $POD_NAME:" - kubectl logs -n chronicler $POD_NAME + kubectl logs --namespace ${namespace} $POD_NAME done } @@ -43,26 +44,26 @@ values_filename=${temp_folder}/values.yaml secret_filename=${temp_folder}/secret.yaml # create kind cluster -kind delete cluster -n ${cluster_name} -kind create cluster -n ${cluster_name} +kind delete cluster --name ${cluster_name} +kind create cluster --name ${cluster_name} # create namespace -kubectl create namespace chronicler +kubectl create namespace ${namespace} # install postgresql chart -helm install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql --namespace chronicler +helm install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql --namespace ${namespace} # build docker image docker build -f src/Chronicler.Dockerfile -t ghcr.io/project-origin/chronicler:test src/ # load docker image into cluster -kind load -n ${cluster_name} docker-image ghcr.io/project-origin/chronicler:test +kind load --name ${cluster_name} docker-image ghcr.io/project-origin/chronicler:test # generate keys openssl genpkey -algorithm ED25519 > ${secret_filename} # generate secret -kubectl create secret generic signing-key --from-file=my-key=${secret_filename} --namespace chronicler +kubectl create secret generic signing-key --from-file=my-key=${secret_filename} --namespace ${namespace} # generate values.yaml file cat << EOF > "${values_filename}" @@ -78,7 +79,7 @@ config: networkConfigurationFile: |- { "RegistryUrls": { - "narniaRegistry": "https://registry.narnia.example.com", + "narniaRegistry": "https://registry.narnia.example.com" } } postgresql: @@ -93,6 +94,15 @@ postgresql: EOF # install chronicler chart -helm install chronicler ./chart --values ${values_filename} --namespace chronicler --wait - -echo "Test completed successfully ✅" +helm install chronicler ./chart --values ${values_filename} --namespace ${namespace} --wait --timeout 1m + +# verify deployment is ready +deployments_status=$(kubectl get deployments --namespace ${namespace} --no-headers | awk '$3 != $4 {print "Deployment " $1 " is not ready"}') + +# Print the results to stderr if there are any issues +if [ -n "$deployments_status" ]; then + echo "$deployments_status" 1>&2 + echo "Test failed ❌" +else + echo "Test completed successfully ✅" +fi diff --git a/chart/templates/deployment.yaml b/chart/templates/deployment.yaml index e28f379..35ad89d 100644 --- a/chart/templates/deployment.yaml +++ b/chart/templates/deployment.yaml @@ -19,12 +19,17 @@ spec: app: po-chronicler spec: serviceAccountName: chronicler-migration-waiter + automountServiceAccountToken: false initContainers: - name: wait-for-migration image: groundnuty/k8s-wait-for:v2.0 args: - "job" - "po-chronicler-migrate-job-{{ .Values.image.tag | default .Chart.AppVersion | replace "." "-" }}" + - "-n {{ .Release.Namespace }}" + volumeMounts: + - name: service-account-token + mountPath: /var/run/secrets/kubernetes.io/serviceaccount containers: - name: po-chronicler image: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} @@ -41,10 +46,10 @@ spec: # Chronicler Configuration {{- if .Values.config.networkConfigurationFile }} - - name: NetworkConfigurationUri - value: /etc/config/networkConfiguration.json + - name: network__ConfigurationUri + value: file:///etc/config/networkConfiguration.json {{- else if .Values.config.networkConfigurationUri }} - - name: NetworkConfigurationUri + - name: network__ConfigurationUri value: {{ .Values.config.networkConfigurationUri }} {{- else }} {{ fail "No network configuration provided" }} @@ -53,36 +58,47 @@ spec: - name: chronicler__JobInterval value: {{ .Values.config.jobInterval }} - - name: chronicler__SigningKey - value: /etc/secret/signing-key + - name: chronicler__SigningKeyFilename + value: &signingKeyFilename /etc/secret/signing-key {{- range $i, $area := .Values.config.gridAreas }} - name: chronicler__GridAreas__{{ $i }} value: {{ $area }} {{- end }} volumeMounts: - - name: signing-key-volume - mountPath: /etc/secret - subPath: signing-key + - name: service-account-token + mountPath: /var/run/secrets/kubernetes.io/serviceaccount + readOnly: true + - name: &signingKeyVolumeName signing-key-volume + mountPath: *signingKeyFilename + subPath: &signingKeyVolumePath signing-key {{- if .Values.config.networkConfigurationFile }} - - name: config-volume - mountPath: /etc/config - subPath: networkConfiguration.json + - name: &networkConfigVolumeName config-volume + mountPath: /etc/config/networkConfiguration.json + subPath: &networkConfigSubPath networkConfiguration.json {{- end }} - - volumes: - - name: signing-key-volume + - name: service-account-token + projected: + sources: + - serviceAccountToken: + path: token + - configMap: + name: kube-root-ca.crt + items: + - key: ca.crt + path: ca.crt + - name: *signingKeyVolumeName secret: secretName: {{ .Values.config.signingKeySecret.name }} items: - key: {{ .Values.config.signingKeySecret.key }} - path: signing-key + path: *signingKeyVolumePath {{- if .Values.config.networkConfigurationFile }} - - name: config-volume + - name: *networkConfigVolumeName configMap: name: po-chronicler-deployment-config items: - - key: networkConfiguration.json - path: networkConfiguration.json + - key: *networkConfigSubPath + path: *networkConfigSubPath {{- end }} diff --git a/chart/templates/migrate-job.yaml b/chart/templates/migrate-job.yaml index 52e95ce..3610cdb 100644 --- a/chart/templates/migrate-job.yaml +++ b/chart/templates/migrate-job.yaml @@ -10,6 +10,7 @@ spec: completions: 1 template: spec: + automountServiceAccountToken: false restartPolicy: Never containers: - name: po-chronicler-migrate-job diff --git a/chart/tests/deployment_test.yaml b/chart/tests/deployment_test.yaml index d52e049..345ae75 100644 --- a/chart/tests/deployment_test.yaml +++ b/chart/tests/deployment_test.yaml @@ -60,7 +60,7 @@ tests: - isKind: of: Deployment - matchRegex: - path: spec.template.spec.containers[0].env[?(@.name=="NetworkConfigurationUri")].value + path: spec.template.spec.containers[0].env[?(@.name=="network__ConfigurationUri")].value pattern: "http://example.com" - it: should add a volume and volume mount for network configuration when networkConfigurationFile is set set: @@ -75,11 +75,11 @@ tests: - isKind: of: Deployment - matchRegex: - path: spec.template.spec.containers[0].env[?(@.name=="NetworkConfigurationUri")].value + path: spec.template.spec.containers[0].env[?(@.name=="network__ConfigurationUri")].value pattern: "/etc/config/networkConfiguration.json" - matchRegex: - path: spec.template.spec.containers[0].volumeMounts[1].name - pattern: "config-volume" + path: spec.template.spec.containers[0].volumeMounts[?(@.name=="config-volume")].mountPath + pattern: "/etc/config" - matchRegex: path: spec.template.spec.volumes[?(@.name=="config-volume")].configMap.items[0].key pattern: "networkConfiguration.json" From a4e9b243334c1e133b1986e0b0f5be46f01cf252 Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Tue, 13 Aug 2024 13:06:02 +0000 Subject: [PATCH 03/10] Added limits --- chart/templates/_limits.yaml | 9 +++++++++ chart/templates/deployment.yaml | 7 +++++++ chart/templates/migrate-job.yaml | 1 + chart/values.yaml | 16 ++++++++++++++++ 4 files changed, 33 insertions(+) create mode 100644 chart/templates/_limits.yaml diff --git a/chart/templates/_limits.yaml b/chart/templates/_limits.yaml new file mode 100644 index 0000000..ec54414 --- /dev/null +++ b/chart/templates/_limits.yaml @@ -0,0 +1,9 @@ +{{- define "limits" -}} +resources: + requests: + cpu: {{ .Values.resources.requests.cpu }} + memory: {{ .Values.resources.requests.memory }} + limits: + #cpu: {{ .Values.resources.limits.cpu }} + memory: {{ .Values.resources.limits.memory | default .Values.resources.requests.memory }} +{{- end -}} diff --git a/chart/templates/deployment.yaml b/chart/templates/deployment.yaml index 35ad89d..0088a9a 100644 --- a/chart/templates/deployment.yaml +++ b/chart/templates/deployment.yaml @@ -23,6 +23,12 @@ spec: initContainers: - name: wait-for-migration image: groundnuty/k8s-wait-for:v2.0 + resources: + requests: + cpu: 0.1 + memory: 50Mi + limits: + memory: 50Mi args: - "job" - "po-chronicler-migrate-job-{{ .Values.image.tag | default .Chart.AppVersion | replace "." "-" }}" @@ -33,6 +39,7 @@ spec: containers: - name: po-chronicler image: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} + {{ include "limits" . | nindent 10 }} args: - "--serve" env: diff --git a/chart/templates/migrate-job.yaml b/chart/templates/migrate-job.yaml index 3610cdb..56b9f3d 100644 --- a/chart/templates/migrate-job.yaml +++ b/chart/templates/migrate-job.yaml @@ -15,6 +15,7 @@ spec: containers: - name: po-chronicler-migrate-job image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{ include "limits" . | nindent 10 }} args: - "--migrate" env: diff --git a/chart/values.yaml b/chart/values.yaml index e2791c1..de6a3f2 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -7,6 +7,22 @@ image: # tag defines the image tag to use, defaults to the chart appVersion tag: +# resources holds the configuration for the resource requests and limits +resources: + # requests holds the configuration for the resource requests + requests: + # cpu defines the cpu request limit for the chronicler server + cpu: 0.5 + # memory defines the memory request limit for the chronicler server + memory: 200Mi + + # limits holds the configuration for the resource limits + limits: + # cpu defines the cpu limit for the chronicler server + cpu: + # memory defines the memory limit for the chronicler server, defaults to the request memory + memory: + # service holds the configuration for the service service: # type is the type of service to create, allowed values are (ClusterIP | LoadBalancer) From 08e34269803b4728b211c25c631d047684b261d5 Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Tue, 13 Aug 2024 13:16:46 +0000 Subject: [PATCH 04/10] Fixed cpu limit --- chart/templates/_limits.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/chart/templates/_limits.yaml b/chart/templates/_limits.yaml index ec54414..75749c6 100644 --- a/chart/templates/_limits.yaml +++ b/chart/templates/_limits.yaml @@ -4,6 +4,8 @@ resources: cpu: {{ .Values.resources.requests.cpu }} memory: {{ .Values.resources.requests.memory }} limits: - #cpu: {{ .Values.resources.limits.cpu }} + {{- if .Values.resources.limits.cpu }} + cpu: {{ .Values.resources.limits.cpu }} + {{- end -}} memory: {{ .Values.resources.limits.memory | default .Values.resources.requests.memory }} {{- end -}} From e08bf5969ba5131b49f49f95cb6748835b662633 Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Wed, 14 Aug 2024 13:07:00 +0000 Subject: [PATCH 05/10] Update reusable-verify-chart.yaml to latest version --- .github/workflows/pullrequest-verify.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pullrequest-verify.yaml b/.github/workflows/pullrequest-verify.yaml index 6aa55ef..cba135f 100644 --- a/.github/workflows/pullrequest-verify.yaml +++ b/.github/workflows/pullrequest-verify.yaml @@ -16,7 +16,7 @@ jobs: uses: project-origin/.github/.github/workflows/reusable-verify-renovate.yaml@5f308f6499ed423ed1252156296e18be614202de verify-chart: - uses: project-origin/.github/.github/workflows/reusable-verify-chart.yaml@5f308f6499ed423ed1252156296e18be614202de + uses: project-origin/.github/.github/workflows/reusable-verify-chart.yaml@32ff33baaf1c9960ef1f01ef4ee554225db1972c verify-container-build: uses: project-origin/.github/.github/workflows/reusable-build-push-container-ghcr.yaml@5f308f6499ed423ed1252156296e18be614202de From 91e043e7f97f0421e8933f290d261cc4588227ea Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Mon, 19 Aug 2024 09:37:54 +0200 Subject: [PATCH 06/10] Changed initContainer to use kubectl --- chart/templates/_limits.yaml | 10 ++++++++-- chart/templates/deployment.yaml | 19 ++++++++++++------- chart/templates/roles.yaml | 2 ++ chart/values.yaml | 10 +++++----- 4 files changed, 27 insertions(+), 14 deletions(-) diff --git a/chart/templates/_limits.yaml b/chart/templates/_limits.yaml index 75749c6..0d800d7 100644 --- a/chart/templates/_limits.yaml +++ b/chart/templates/_limits.yaml @@ -1,11 +1,17 @@ {{- define "limits" -}} resources: requests: + {{- if .Values.resources.requests.cpu }} cpu: {{ .Values.resources.requests.cpu }} + {{- end }} + {{- if .Values.resources.requests.memory }} memory: {{ .Values.resources.requests.memory }} + {{- end }} limits: {{- if .Values.resources.limits.cpu }} cpu: {{ .Values.resources.limits.cpu }} - {{- end -}} - memory: {{ .Values.resources.limits.memory | default .Values.resources.requests.memory }} + {{- end }} + {{- if .Values.resources.limits.memory }} + memory: {{ .Values.resources.limits.memory }} + {{- end }} {{- end -}} diff --git a/chart/templates/deployment.yaml b/chart/templates/deployment.yaml index 0088a9a..8e0ea47 100644 --- a/chart/templates/deployment.yaml +++ b/chart/templates/deployment.yaml @@ -22,17 +22,20 @@ spec: automountServiceAccountToken: false initContainers: - name: wait-for-migration - image: groundnuty/k8s-wait-for:v2.0 + image: bitnami/kubectl:1.30.4 resources: requests: cpu: 0.1 - memory: 50Mi limits: memory: 50Mi - args: - - "job" - - "po-chronicler-migrate-job-{{ .Values.image.tag | default .Chart.AppVersion | replace "." "-" }}" - - "-n {{ .Release.Namespace }}" + command: + - /bin/sh + - -c + - | + kubectl wait --for=condition=complete job/${JOB_NAME} --timeout=300s -n {{ .Release.Namespace }} + env: + - name: JOB_NAME + value: po-chronicler-migrate-job-{{ .Values.image.tag | default .Chart.AppVersion | replace "." "-" }} volumeMounts: - name: service-account-token mountPath: /var/run/secrets/kubernetes.io/serviceaccount @@ -48,8 +51,10 @@ spec: # OpenTelemetry Collector Configuration - name: Otlp__Enabled value: {{ .Values.otlp.enabled | quote }} + {{- if .Values.otlp.enabled }} - name: Otlp__Endpoint - value: {{ .Values.otlp.endpoint }} + value: {{ required "otel.endpoint is required when enabled" .Values.otlp.endpoint | quote }} + {{- end }} # Chronicler Configuration {{- if .Values.config.networkConfigurationFile }} diff --git a/chart/templates/roles.yaml b/chart/templates/roles.yaml index 47cdb51..4077c3d 100644 --- a/chart/templates/roles.yaml +++ b/chart/templates/roles.yaml @@ -17,6 +17,8 @@ rules: - jobs verbs: - get + - list + - watch --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/chart/values.yaml b/chart/values.yaml index de6a3f2..b0e0095 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -12,16 +12,16 @@ resources: # requests holds the configuration for the resource requests requests: # cpu defines the cpu request limit for the chronicler server - cpu: 0.5 + cpu: 500m # memory defines the memory request limit for the chronicler server - memory: 200Mi + memory: # limits holds the configuration for the resource limits limits: # cpu defines the cpu limit for the chronicler server cpu: - # memory defines the memory limit for the chronicler server, defaults to the request memory - memory: + # memory defines the memory limit for the chronicler server + memory: 200Mi # service holds the configuration for the service service: @@ -108,4 +108,4 @@ otlp: # enabled determines if the opentelemetry collector is enabled enabled: false # endpoint defines the endpoint of the opentelemetry collector, example "http://otel-collector:4317" - endpoint: "http://opentelemetry-collector.otel-collector:4317" + endpoint: From 964fa520b97a11a0db0dfe0e7914a6ff0f29bef4 Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Mon, 19 Aug 2024 09:38:31 +0200 Subject: [PATCH 07/10] removed .debug --- .../templates/deployment.yaml | 66 ------------------- .../templates/network-config.yaml | 1 - 2 files changed, 67 deletions(-) delete mode 100644 chart/.debug/project-origin-chronicler/templates/deployment.yaml delete mode 100644 chart/.debug/project-origin-chronicler/templates/network-config.yaml diff --git a/chart/.debug/project-origin-chronicler/templates/deployment.yaml b/chart/.debug/project-origin-chronicler/templates/deployment.yaml deleted file mode 100644 index 06789f6..0000000 --- a/chart/.debug/project-origin-chronicler/templates/deployment.yaml +++ /dev/null @@ -1,66 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: po-chronicler-deployment - namespace: NAMESPACE - labels: - app: po-chronicler -spec: - replicas: 3 - selector: - matchLabels: - app: po-chronicler - strategy: - type: Recreate - template: - metadata: - labels: - app: po-chronicler - spec: - serviceAccountName: chronicler-migration-waiter - initContainers: - - name: wait-for-migration - image: groundnuty/k8s-wait-for:v2.0 - args: - - "job" - - "po-chronicler-migrate-job-0-1-0" - containers: - - name: po-chronicler - image: ghcr.io/project-origin/chronicler:0.1.0 - args: - - "--serve" - env: - # Database Configuration - - name: DB_HOST - value: "_" - - name: DB_PORT - value: "_" - - name: DB_DATABASE - value: "_" - - name: DB_USERNAME - value: "_" - - name: DB_PASSWORD - value: "_" - - # OpenTelemetry Collector Configuration - - name: Otlp__Enabled - value: "false" - - name: Otlp__Endpoint - value: http://opentelemetry-collector.otel-collector:4317 - - # Chronicler Configuration - - name: NetworkConfigurationUri - value: /etc/config/networkConfiguration.json - volumeMounts: - - name: config-volume - mountPath: /etc/config - subPath: networkConfiguration.json - - volumes: - - name: config-volume - configMap: - name: po-chronicler-deployment-config - items: - - key: networkConfiguration.json - path: networkConfiguration.json diff --git a/chart/.debug/project-origin-chronicler/templates/network-config.yaml b/chart/.debug/project-origin-chronicler/templates/network-config.yaml deleted file mode 100644 index 8b13789..0000000 --- a/chart/.debug/project-origin-chronicler/templates/network-config.yaml +++ /dev/null @@ -1 +0,0 @@ - From 0c97ed13ccc9661f6bdd38df7a5cec3e9b48161f Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Mon, 19 Aug 2024 08:11:26 +0000 Subject: [PATCH 08/10] Moved UriOptionsLoader to common --- .../ProjectOrigin.Chronicler.Server.csproj | 2 +- .../Startup.cs | 4 +- .../UriOptionsLoader/ConfigureExtensions.cs | 26 --- .../UriOptionsLoader/UriOptions.cs | 12 -- .../UriOptionsChangeTokenSource.cs | 21 -- .../UriOptionsLoader/UriOptionsConfigure.cs | 18 -- .../UriOptionsLoaderService.cs | 130 ------------- .../UriOptionsLoaderTests.cs | 183 ------------------ 8 files changed, 2 insertions(+), 394 deletions(-) delete mode 100644 src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/ConfigureExtensions.cs delete mode 100644 src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptions.cs delete mode 100644 src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsChangeTokenSource.cs delete mode 100644 src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsConfigure.cs delete mode 100644 src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsLoaderService.cs delete mode 100644 src/ProjectOrigin.Chronicler.Test/UriOptionsLoaderTests.cs diff --git a/src/ProjectOrigin.Chronicler.Server/ProjectOrigin.Chronicler.Server.csproj b/src/ProjectOrigin.Chronicler.Server/ProjectOrigin.Chronicler.Server.csproj index 2dc0ecb..b999320 100644 --- a/src/ProjectOrigin.Chronicler.Server/ProjectOrigin.Chronicler.Server.csproj +++ b/src/ProjectOrigin.Chronicler.Server/ProjectOrigin.Chronicler.Server.csproj @@ -9,7 +9,7 @@ - + diff --git a/src/ProjectOrigin.Chronicler.Server/Startup.cs b/src/ProjectOrigin.Chronicler.Server/Startup.cs index 9002c36..f25403e 100644 --- a/src/ProjectOrigin.Chronicler.Server/Startup.cs +++ b/src/ProjectOrigin.Chronicler.Server/Startup.cs @@ -7,11 +7,9 @@ using ProjectOrigin.ServiceCommon.Database; using ProjectOrigin.ServiceCommon.Grpc; using ProjectOrigin.ServiceCommon.Otlp; -using ProjectOrigin.Chronicler.Server.Repositories; -using Microsoft.Extensions.Options; using ProjectOrigin.ServiceCommon.UriOptionsLoader; +using ProjectOrigin.Chronicler.Server.Repositories; using ProjectOrigin.Chronicler.Server.BlockReader; -using ProjectOrigin.ServiceCommon.Database.Postgres; namespace ProjectOrigin.Chronicler.Server; diff --git a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/ConfigureExtensions.cs b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/ConfigureExtensions.cs deleted file mode 100644 index f7a28bb..0000000 --- a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/ConfigureExtensions.cs +++ /dev/null @@ -1,26 +0,0 @@ -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Options; - -namespace ProjectOrigin.ServiceCommon.UriOptionsLoader; - -public static class ConfigureExtensions -{ - /// - /// Configures the UriOptionsLoaderService to load the Uri json object into the specified type - /// - /// The type to load the Uri json object into - /// The Microsoft.Extensions.DependencyInjection.IServiceCollection. - /// The name of the configuration section to bind configuration from - public static void ConfigureUriOptionsLoader(this IServiceCollection services, string configSectionPath) where TOption : class - { - services.AddOptions() - .BindConfiguration(configSectionPath) - .ValidateDataAnnotations() - .ValidateOnStart(); - - services.AddSingleton, UriOptionsConfigure>(); - services.AddSingleton, UriOptionsChangeTokenSource>(); - services.AddSingleton>(); - services.AddHostedService(provider => provider.GetRequiredService>()); - } -} diff --git a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptions.cs b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptions.cs deleted file mode 100644 index 91460dc..0000000 --- a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptions.cs +++ /dev/null @@ -1,12 +0,0 @@ -using System; -using System.ComponentModel.DataAnnotations; - -namespace ProjectOrigin.ServiceCommon.UriOptionsLoader; - -public record UriOptions -{ - [Required(AllowEmptyStrings = false)] - public required Uri ConfigurationUri { get; init; } - - public TimeSpan RefreshInterval { get; init; } = TimeSpan.FromMinutes(15); -} diff --git a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsChangeTokenSource.cs b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsChangeTokenSource.cs deleted file mode 100644 index 2e19759..0000000 --- a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsChangeTokenSource.cs +++ /dev/null @@ -1,21 +0,0 @@ -using Microsoft.Extensions.Options; -using Microsoft.Extensions.Primitives; - -namespace ProjectOrigin.ServiceCommon.UriOptionsLoader; - -public class UriOptionsChangeTokenSource : IOptionsChangeTokenSource where TOptions : class -{ - private readonly UriOptionsLoaderService _httpOptionsLoader; - - public UriOptionsChangeTokenSource(UriOptionsLoaderService httpOptionsLoader) - { - _httpOptionsLoader = httpOptionsLoader; - } - - public string? Name => null; - - public IChangeToken GetChangeToken() - { - return _httpOptionsLoader.OptionChangeToken; - } -} diff --git a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsConfigure.cs b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsConfigure.cs deleted file mode 100644 index c99c4a4..0000000 --- a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsConfigure.cs +++ /dev/null @@ -1,18 +0,0 @@ -using Microsoft.Extensions.Options; - -namespace ProjectOrigin.ServiceCommon.UriOptionsLoader; - -public class UriOptionsConfigure : IConfigureOptions where TOptions : class -{ - private readonly UriOptionsLoaderService _httpOptionsLoader; - - public UriOptionsConfigure(UriOptionsLoaderService httpOptionsLoader) - { - _httpOptionsLoader = httpOptionsLoader; - } - - public void Configure(TOptions options) - { - _httpOptionsLoader.Configure(options); - } -} diff --git a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsLoaderService.cs b/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsLoaderService.cs deleted file mode 100644 index c8b9522..0000000 --- a/src/ProjectOrigin.Chronicler.Server/UriOptionsLoader/UriOptionsLoaderService.cs +++ /dev/null @@ -1,130 +0,0 @@ -using System; -using System.Net.Http; -using System.Net.Http.Json; -using System.Reflection; -using System.Text.Json; -using System.Threading; -using System.Threading.Tasks; -using Microsoft.Extensions.Hosting; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Options; -using Microsoft.Extensions.Primitives; - -namespace ProjectOrigin.ServiceCommon.UriOptionsLoader; - -public sealed class UriOptionsLoaderService : BackgroundService, IDisposable where TOption : class -{ - private readonly ILogger> _logger; - private readonly IHttpClientFactory _httpClientFactory; - private readonly UriOptions _originOptions; - private readonly PropertyInfo[] _optionProperties; - private CancellationTokenSource _changeTokenSource; - private TOption _option; - - public IChangeToken OptionChangeToken { get; private set; } - - public UriOptionsLoaderService( - ILogger> logger, - IHttpClientFactory httpClientFactory, - IOptions originOptions) - { - _logger = logger; - _httpClientFactory = httpClientFactory; - _originOptions = originOptions.Value; - _optionProperties = typeof(TOption).GetProperties(); - _changeTokenSource = new CancellationTokenSource(); - OptionChangeToken = new CancellationChangeToken(_changeTokenSource.Token); - - _option = LoadRemoteOptions(CancellationToken.None).GetAwaiter().GetResult(); - } - - public void Configure(TOption target) - { - foreach (var property in _optionProperties) - { - if (property.CanRead && property.CanWrite) - { - var targetValue = property.GetValue(target); - var sourceValue = property.GetValue(_option); - - if (!Equals(targetValue, sourceValue)) - { - property.SetValue(target, sourceValue); - } - } - } - } - - protected override async Task ExecuteAsync(CancellationToken stoppingToken) - { - using var timer = new PeriodicTimer(_originOptions.RefreshInterval); - - while (!stoppingToken.IsCancellationRequested && - await timer.WaitForNextTickAsync(stoppingToken)) - { - try - { - var newOptions = await LoadRemoteOptions(stoppingToken); - - if (!newOptions.Equals(_option)) - { - _option = newOptions; - await NotifyChanges(); - } - } - catch (Exception ex) - { - _logger.LogError(ex, "Error while loading options from {uri}", _originOptions.ConfigurationUri); - } - } - } - - private async Task NotifyChanges() - { - var newTokenSource = new CancellationTokenSource(); - OptionChangeToken = new CancellationChangeToken(newTokenSource.Token); - await _changeTokenSource.CancelAsync(); - _changeTokenSource.Dispose(); - _changeTokenSource = newTokenSource; - } - - private async Task LoadRemoteOptions(CancellationToken stoppingToken) - { - if (_originOptions.ConfigurationUri.Scheme == Uri.UriSchemeHttp || - _originOptions.ConfigurationUri.Scheme == Uri.UriSchemeHttps) - { - return await LoadFromHttp(stoppingToken); - } - else if (_originOptions.ConfigurationUri.Scheme == Uri.UriSchemeFile) - { - return await LoadFromFile(stoppingToken); - } - else - { - throw new NotSupportedException($"Unsupported URI scheme: {_originOptions.ConfigurationUri.Scheme}"); - } - } - - private async Task LoadFromHttp(CancellationToken stoppingToken) - { - using var httpClient = _httpClientFactory.CreateClient(); - var response = await httpClient.GetAsync(_originOptions.ConfigurationUri, stoppingToken); - response.EnsureSuccessStatusCode(); - - return await response.Content.ReadFromJsonAsync(stoppingToken) - ?? throw new JsonException("Failed to read options from response."); - } - - private async Task LoadFromFile(CancellationToken stoppingToken) - { - var json = await System.IO.File.ReadAllTextAsync(_originOptions.ConfigurationUri.LocalPath, stoppingToken); - return JsonSerializer.Deserialize(json) - ?? throw new JsonException("Failed to read options from file."); - } - - public override void Dispose() - { - _changeTokenSource.Dispose(); - base.Dispose(); - } -} diff --git a/src/ProjectOrigin.Chronicler.Test/UriOptionsLoaderTests.cs b/src/ProjectOrigin.Chronicler.Test/UriOptionsLoaderTests.cs deleted file mode 100644 index c817db7..0000000 --- a/src/ProjectOrigin.Chronicler.Test/UriOptionsLoaderTests.cs +++ /dev/null @@ -1,183 +0,0 @@ -using System.Collections.Generic; -using System.Threading.Tasks; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Hosting; -using Microsoft.Extensions.Options; -using WireMock.RequestBuilders; -using WireMock.ResponseBuilders; -using WireMock.Server; -using Xunit; -using ProjectOrigin.ServiceCommon.UriOptionsLoader; -using Microsoft.Extensions.Configuration.Memory; -using Microsoft.AspNetCore.Hosting; -using FluentAssertions; -using System; - -namespace ProjectOrigin.Chronicler.Test; - -public class UriOptionsLoaderTests -{ - private const string TestPath = "/TestPath"; - private const string Scenario = "MyScenario"; - private const string SecondCallState = "Second Call"; - - [Fact] - public async Task WhenChanged_MonitorCalled() - { - var _networkMockServer = WireMockServer.Start(); - - var _builder = new HostBuilder(); - _builder.ConfigureHostConfiguration(config => - config.Add(new MemoryConfigurationSource() - { - InitialData = new Dictionary() - { - { "TestPath:ConfigurationUri", _networkMockServer.Urls[0] + TestPath }, - { "TestPath:RefreshInterval", "00:00:10" }, - } - })); - - _builder.ConfigureServices((context, services) => - { - services.AddHttpClient(); - services.ConfigureUriOptionsLoader("TestPath"); - services.AddSingleton(); - }); - - _networkMockServer.Given(Request.Create().WithPath(TestPath).UsingGet()) - .InScenario(Scenario) - .WillSetStateTo(SecondCallState) - .RespondWith(Response.Create() - .WithStatusCode(200) - .WithHeader("Content-Type", "application/json") - .WithBodyAsJson(new - { - SomeKey = "SomeValue", - })); - - _networkMockServer.Given(Request.Create().WithPath(TestPath).UsingGet()) - .InScenario(Scenario) - .WhenStateIs(SecondCallState) - .RespondWith(Response.Create() - .WithStatusCode(200) - .WithHeader("Content-Type", "application/json") - .WithBodyAsJson(new - { - SomeKey = "SomeOtherValue", - })); - - - using var host = _builder.Build(); - await Task.Run(host.Start); - - var listener = host.Services.GetRequiredService(); - - TestOption currentOption = listener.MonitorOption.CurrentValue; - TestOption? newOption = null; - var taskCompletionSource = new TaskCompletionSource(); - - listener.MonitorOption.OnChange((options, name) => - { - newOption = options; - taskCompletionSource.SetResult(true); - }); - - await taskCompletionSource.Task; - - Assert.NotEqual(currentOption, newOption); - } - - [Fact] - public async Task WhenNotChanged_MonitorNotCalled() - { - var _networkMockServer = WireMockServer.Start(); - - var _builder = new HostBuilder(); - _builder.ConfigureHostConfiguration(config => - config.Add(new MemoryConfigurationSource() - { - InitialData = new Dictionary() - { - { "TestPath:ConfigurationUri", _networkMockServer.Urls[0] + TestPath }, - { "TestPath:RefreshInterval", "00:00:5" }, - } - })); - - _builder.ConfigureServices((context, services) => - { - services.AddHttpClient(); - services.ConfigureUriOptionsLoader("TestPath"); - services.AddSingleton(); - }); - - _networkMockServer.Given(Request.Create().WithPath(TestPath).UsingGet()) - .RespondWith(Response.Create() - .WithStatusCode(200) - .WithHeader("Content-Type", "application/json") - .WithBodyAsJson(new - { - SomeKey = "SomeValue", - })); - - using var host = _builder.Build(); - await Task.Run(host.Start); - - var listener = host.Services.GetRequiredService(); - bool called = false; - - listener.MonitorOption.OnChange((options, name) => - { - called = true; - }); - - await Task.Delay(13000); - _networkMockServer.LogEntries.Should().HaveCount(3); - called.Should().BeFalse(); - } - - [Fact] - public async Task NotSupportedFormat_ThrowsException() - { - // Arrange - var _builder = new HostBuilder(); - _builder.ConfigureHostConfiguration(config => - config.Add(new MemoryConfigurationSource() - { - InitialData = new Dictionary() - { - { "TestPath:ConfigurationUri", "some://hello.com" }, - { "TestPath:RefreshInterval", "00:00:5" }, - } - })); - - - _builder.ConfigureServices((context, services) => - { - services.AddHttpClient(); - services.ConfigureUriOptionsLoader("TestPath"); - }); - - using var host = _builder.Build(); - - // Act - var exception = await Assert.ThrowsAsync(() => Task.Run(host.Start)); - - // Assert - exception.Message.Should().Be("Unsupported URI scheme: some"); - } - - internal class NetworkOptionsChangeListener - { - public IOptionsMonitor MonitorOption { get; } - - public NetworkOptionsChangeListener(IOptionsMonitor optionsMonitor) - { - MonitorOption = optionsMonitor; - } - } - - internal record TestOption - { - public required string SomeKey { get; init; } - } -} From 23cbb1e5a9d12b3bd372589b47cf57cbcbf9bb3f Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Mon, 19 Aug 2024 10:13:13 +0000 Subject: [PATCH 09/10] Improve security by setting container security context --- chart/templates/deployment.yaml | 9 +++++++++ chart/templates/migrate-job.yaml | 6 ++++++ src/Chronicler.Dockerfile | 20 ++++++------------- .../ProjectOrigin.Chronicler.Server.csproj | 1 + 4 files changed, 22 insertions(+), 14 deletions(-) diff --git a/chart/templates/deployment.yaml b/chart/templates/deployment.yaml index 8e0ea47..8593fcb 100644 --- a/chart/templates/deployment.yaml +++ b/chart/templates/deployment.yaml @@ -20,6 +20,9 @@ spec: spec: serviceAccountName: chronicler-migration-waiter automountServiceAccountToken: false + securityContext: + runAsUser: 1654 + runAsNonRoot: true initContainers: - name: wait-for-migration image: bitnami/kubectl:1.30.4 @@ -33,6 +36,9 @@ spec: - -c - | kubectl wait --for=condition=complete job/${JOB_NAME} --timeout=300s -n {{ .Release.Namespace }} + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true env: - name: JOB_NAME value: po-chronicler-migrate-job-{{ .Values.image.tag | default .Chart.AppVersion | replace "." "-" }} @@ -45,6 +51,9 @@ spec: {{ include "limits" . | nindent 10 }} args: - "--serve" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true env: {{- include "database.config" . | nindent 12 }} diff --git a/chart/templates/migrate-job.yaml b/chart/templates/migrate-job.yaml index 56b9f3d..6a5e83d 100644 --- a/chart/templates/migrate-job.yaml +++ b/chart/templates/migrate-job.yaml @@ -11,6 +11,9 @@ spec: template: spec: automountServiceAccountToken: false + securityContext: + runAsUser: 1654 + runAsNonRoot: true restartPolicy: Never containers: - name: po-chronicler-migrate-job @@ -20,3 +23,6 @@ spec: - "--migrate" env: {{- include "database.config" . | nindent 12 }} + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true diff --git a/src/Chronicler.Dockerfile b/src/Chronicler.Dockerfile index ead2707..4ab5dcd 100644 --- a/src/Chronicler.Dockerfile +++ b/src/Chronicler.Dockerfile @@ -1,5 +1,4 @@ ARG PROJECT=ProjectOrigin.Chronicler.Server -ARG USER=dotnetuser FROM --platform=$BUILDPLATFORM mcr.microsoft.com/dotnet/sdk:8.0.204 AS build ARG PROJECT @@ -10,23 +9,16 @@ COPY ./Protos ./Protos COPY ./${PROJECT} ./${PROJECT} RUN dotnet restore ${PROJECT} -RUN dotnet build ${PROJECT} -c Release --no-restore -o /app/build -RUN dotnet publish ${PROJECT} -c Release -o /app/publish +RUN dotnet build ${PROJECT} -c Release --no-restore -p:CustomAssemblyName=App +RUN dotnet publish ${PROJECT} -c Release --no-build -p:CustomAssemblyName=App -o /app/publish # ------- production image ------- -FROM mcr.microsoft.com/dotnet/aspnet:8.0.4 AS production -ARG PROJECT -ARG USER - -ENV USER=dotnetuser -ENV APPLICATION=${PROJECT} -RUN groupadd -r "$USER" && useradd -r -g "$USER" "$USER" +FROM mcr.microsoft.com/dotnet/aspnet:8.0.4-jammy-chiseled-extra AS production WORKDIR /app -COPY --chown=root:root --from=build /app/publish . -RUN chmod -R 655 . +COPY --from=build /app/publish . -USER $USER EXPOSE 5000 EXPOSE 5001 -ENTRYPOINT ["/bin/sh", "-c", "dotnet ${APPLICATION}.dll \"${@}\"", "--" ] + +ENTRYPOINT ["dotnet", "App.dll"] diff --git a/src/ProjectOrigin.Chronicler.Server/ProjectOrigin.Chronicler.Server.csproj b/src/ProjectOrigin.Chronicler.Server/ProjectOrigin.Chronicler.Server.csproj index b999320..7cc78d4 100644 --- a/src/ProjectOrigin.Chronicler.Server/ProjectOrigin.Chronicler.Server.csproj +++ b/src/ProjectOrigin.Chronicler.Server/ProjectOrigin.Chronicler.Server.csproj @@ -6,6 +6,7 @@ disable true $(NoWarn);1591 + $(CustomAssemblyName) From 332d41a4dd97f268f5a12daf277bea583462be32 Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Mon, 19 Aug 2024 10:21:58 +0000 Subject: [PATCH 10/10] Add ephemeral-storage limit to deployment and limits templates --- chart/templates/_limits.yaml | 1 + chart/templates/deployment.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/chart/templates/_limits.yaml b/chart/templates/_limits.yaml index 0d800d7..6f54716 100644 --- a/chart/templates/_limits.yaml +++ b/chart/templates/_limits.yaml @@ -14,4 +14,5 @@ resources: {{- if .Values.resources.limits.memory }} memory: {{ .Values.resources.limits.memory }} {{- end }} + ephemeral-storage: "1Mi" {{- end -}} diff --git a/chart/templates/deployment.yaml b/chart/templates/deployment.yaml index 8593fcb..786bfbe 100644 --- a/chart/templates/deployment.yaml +++ b/chart/templates/deployment.yaml @@ -31,6 +31,7 @@ spec: cpu: 0.1 limits: memory: 50Mi + ephemeral-storage: "1Mi" command: - /bin/sh - -c