From c617e49a6369ec4a516b8fe85234cc50f0b93445 Mon Sep 17 00:00:00 2001 From: Simon Larsen Date: Fri, 5 Jul 2024 10:51:19 +0100 Subject: [PATCH] refactor: Update ingestor replica count based on configuration This commit updates the ingestor replica count in the HelmChart/Public/oneuptime/templates/ingestor.yaml file based on the configuration. If the `$.Values.deployment.ingestor.replicaCount` value is provided, it sets the replicas to that value. Otherwise, it falls back to `$.Values.deployment.replicaCount`. This change ensures that the ingestor replica count is correctly configured, improving the scalability and performance of the application. --- .../Public/oneuptime/templates/ingestor.yaml | 4 ++ HelmChart/Public/oneuptime/values.yaml | 2 + Ingestor/API/Monitor.ts | 40 ++++++++++++------- 3 files changed, 31 insertions(+), 15 deletions(-) diff --git a/HelmChart/Public/oneuptime/templates/ingestor.yaml b/HelmChart/Public/oneuptime/templates/ingestor.yaml index 55c634ff69c..951c9449c78 100644 --- a/HelmChart/Public/oneuptime/templates/ingestor.yaml +++ b/HelmChart/Public/oneuptime/templates/ingestor.yaml @@ -15,7 +15,11 @@ spec: selector: matchLabels: app: {{ printf "%s-%s" $.Release.Name "ingestor" }} + {{- if $.Values.deployment.ingestor.replicaCount }} + replicas: {{ $.Values.deployment.ingestor.replicaCount }} + {{- else }} replicas: {{ $.Values.deployment.replicaCount }} + {{- end }} template: metadata: labels: diff --git a/HelmChart/Public/oneuptime/values.yaml b/HelmChart/Public/oneuptime/values.yaml index 699b19c8c91..20c5b9ec793 100644 --- a/HelmChart/Public/oneuptime/values.yaml +++ b/HelmChart/Public/oneuptime/values.yaml @@ -17,6 +17,8 @@ fluentdHost: deployment: replicaCount: 1 + ingestor: + replicaCount: metalLb: enabled: false diff --git a/Ingestor/API/Monitor.ts b/Ingestor/API/Monitor.ts index efa9b381fcb..76e52d308b6 100644 --- a/Ingestor/API/Monitor.ts +++ b/Ingestor/API/Monitor.ts @@ -275,6 +275,8 @@ router.post( // update the lastMonitoredAt field of the monitors + const updatePromises: Array> = []; + for (const monitorProbe of monitorProbes) { if (!monitorProbe.monitor) { continue; @@ -293,18 +295,22 @@ router.post( logger.error(err); } - await MonitorProbeService.updateOneById({ - id: monitorProbe.id!, - data: { - lastPingAt: OneUptimeDate.getCurrentDate(), - nextPingAt: nextPing, - }, - props: { - isRoot: true, - }, - }); + updatePromises.push( + MonitorProbeService.updateOneById({ + id: monitorProbe.id!, + data: { + lastPingAt: OneUptimeDate.getCurrentDate(), + nextPingAt: nextPing, + }, + props: { + isRoot: true, + }, + }), + ); } + await Promise.all(updatePromises); + // if (mutex) { // try { // await Semaphore.release(mutex); @@ -326,15 +332,19 @@ router.post( // check if the monitor needs secrets to be filled. - const monitorsWithSecretPopulated: Array = []; + let monitorsWithSecretPopulated: Array = []; + const monitorWithSecretsPopulatePromises: Array> = []; for (const monitor of monitors) { - const monitorWithSecrets: Monitor = - await MonitorUtil.populateSecrets(monitor); - - monitorsWithSecretPopulated.push(monitorWithSecrets); + monitorWithSecretsPopulatePromises.push( + MonitorUtil.populateSecrets(monitor), + ); } + monitorsWithSecretPopulated = await Promise.all( + monitorWithSecretsPopulatePromises, + ); + logger.debug("Populated secrets"); logger.debug(monitorsWithSecretPopulated);