From 06807cf84bcb40840283cbaeb9463148951c4dee Mon Sep 17 00:00:00 2001 From: Kenneth Kehl <@kkehl@flexion.us> Date: Mon, 25 Nov 2024 11:57:06 -0800 Subject: [PATCH 01/15] change celery pool support from prefork to threads --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index acd31f390..ae79b930c 100644 --- a/Makefile +++ b/Makefile @@ -50,7 +50,8 @@ run-celery: ## Run celery, TODO remove purge for staging/prod -A run_celery.notify_celery worker \ --pidfile="/tmp/celery.pid" \ --loglevel=INFO \ - --concurrency=4 + --pool=threads + --concurrency=10 .PHONY: dead-code From 738b4f063e647241769e73edf6d75c9db487bd54 Mon Sep 17 00:00:00 2001 From: Kenneth Kehl <@kkehl@flexion.us> Date: Mon, 25 Nov 2024 12:17:38 -0800 Subject: [PATCH 02/15] add adr --- ...0-adr-celery-pool-support-best-practice.md | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 docs/adrs/0010-adr-celery-pool-support-best-practice.md diff --git a/docs/adrs/0010-adr-celery-pool-support-best-practice.md b/docs/adrs/0010-adr-celery-pool-support-best-practice.md new file mode 100644 index 000000000..b8525e654 --- /dev/null +++ b/docs/adrs/0010-adr-celery-pool-support-best-practice.md @@ -0,0 +1,23 @@ +# Make best use of celery worker pools + +Status: N/A +Date: N/A + +### Context +Our API application started with initial celery pool support of 'prefork' (the default) and concurrency of 4. We continuously encountered instability, which we initially attributed to a resource leak. As a result of this we added the configuration `worker-max-tasks-per-child=500` which is a best practice. When we ran a load test of 25000 simulated messages, however, we continued to see stability issues, amounting to a crash of the app after 4 hours requiring a restage. Based on running `cf app notify-api-production` and observing that `cpu entitlement` was off the charts at 10000% to 12000% for the works, and after doing some further reading, we came to the conclusion that perhaps `prefork` pool support is not the best type of pool support for the API application. + +The problem with `prefork` is that each process has a tendency to hang onto the CPU allocated to it, even if it is not being used. Our application is not computationally intensive and largely consists of downloading strings from S3, parsing the strings, and sending them out as SMS messages. Based on the determination that our app is likely I/O bound, we elected to do an experiment where we changed pool support to `threads` and increased concurrency to `10`. The expectation is that memory usage will decrease and CPU usage will decrease and the app will not become unavailable. + +### Decision + +### Consequences + +### Author +@kenkehl + +### Stakeholders +@ccostino +@stvnrlly + +### Next Steps +- Run an after-hours load test with production configured to --pool=threads and --concurrency=10 (concurrency can be cautiously increased once we know it works) From 8c6f7ede0bb0043297f57c95fc0a841ea95ada0d Mon Sep 17 00:00:00 2001 From: Kenneth Kehl <@kkehl@flexion.us> Date: Mon, 25 Nov 2024 15:26:50 -0800 Subject: [PATCH 03/15] oops add pool=threads to manifest --- manifest.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifest.yml b/manifest.yml index a8a3e7f2b..39e842730 100644 --- a/manifest.yml +++ b/manifest.yml @@ -26,7 +26,7 @@ applications: - type: worker instances: ((worker_instances)) memory: ((worker_memory)) - command: newrelic-admin run-program celery -A run_celery.notify_celery worker --loglevel=INFO --concurrency=4 + command: newrelic-admin run-program celery -A run_celery.notify_celery worker --loglevel=INFO --pool=threads --concurrency=10 - type: scheduler instances: 1 memory: ((scheduler_memory)) From f8e30f80b8b1c06935fef75be94ee2411f9ec3ff Mon Sep 17 00:00:00 2001 From: Kenneth Kehl <@kkehl@flexion.us> Date: Thu, 2 Jan 2025 09:29:30 -0800 Subject: [PATCH 04/15] add performance testing debug info --- app/dao/notifications_dao.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/app/dao/notifications_dao.py b/app/dao/notifications_dao.py index b9c3118fa..68c1268e3 100644 --- a/app/dao/notifications_dao.py +++ b/app/dao/notifications_dao.py @@ -1,5 +1,6 @@ import json from datetime import timedelta +from time import time from flask import current_app from sqlalchemy import ( @@ -727,6 +728,7 @@ def get_service_ids_with_notifications_on_date(notification_type, date): def dao_update_delivery_receipts(receipts, delivered): + start_time_millis = time() * 1000 new_receipts = [] for r in receipts: if isinstance(r, str): @@ -773,3 +775,7 @@ def dao_update_delivery_receipts(receipts, delivered): ) db.session.execute(stmt) db.session.commit() + elapsed_time = (time() * 1000) - start_time_millis + current_app.logger.info( + f"#loadtestperformance batch update query time: {elapsed_time} ms" + ) From e2d64c073872176049f455a1d1ec635e50f2c365 Mon Sep 17 00:00:00 2001 From: Kenneth Kehl <@kkehl@flexion.us> Date: Thu, 2 Jan 2025 11:07:20 -0800 Subject: [PATCH 05/15] add number of notifications processed --- app/dao/notifications_dao.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/app/dao/notifications_dao.py b/app/dao/notifications_dao.py index 68c1268e3..139f7ae8a 100644 --- a/app/dao/notifications_dao.py +++ b/app/dao/notifications_dao.py @@ -777,5 +777,6 @@ def dao_update_delivery_receipts(receipts, delivered): db.session.commit() elapsed_time = (time() * 1000) - start_time_millis current_app.logger.info( - f"#loadtestperformance batch update query time: {elapsed_time} ms" + f"#loadtestperformance batch update query time: \ + updated {len(receipts)} notification in {elapsed_time} ms" ) From cd13984e3ec42c3e8fe7a80f5a7bff88630309bb Mon Sep 17 00:00:00 2001 From: Kenneth Kehl <@kkehl@flexion.us> Date: Mon, 6 Jan 2025 07:19:54 -0800 Subject: [PATCH 06/15] placeholder --- app/delivery/send_to_providers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/delivery/send_to_providers.py b/app/delivery/send_to_providers.py index e41062b41..74be6429c 100644 --- a/app/delivery/send_to_providers.py +++ b/app/delivery/send_to_providers.py @@ -158,7 +158,7 @@ def _get_verify_code(notification): recipient = recipient.decode("utf-8") return recipient - +# PUT THE FIX HERE??????? def get_sender_numbers(notification): possible_senders = dao_get_sms_senders_by_service_id(notification.service_id) sender_numbers = [] From 628c6eb84820f83b928aed12086f2d3c47f7cf3f Mon Sep 17 00:00:00 2001 From: Kenneth Kehl <@kkehl@flexion.us> Date: Mon, 6 Jan 2025 11:45:31 -0800 Subject: [PATCH 07/15] do minimalistic fix first --- app/delivery/send_to_providers.py | 2 +- app/models.py | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/app/delivery/send_to_providers.py b/app/delivery/send_to_providers.py index 74be6429c..e41062b41 100644 --- a/app/delivery/send_to_providers.py +++ b/app/delivery/send_to_providers.py @@ -158,7 +158,7 @@ def _get_verify_code(notification): recipient = recipient.decode("utf-8") return recipient -# PUT THE FIX HERE??????? + def get_sender_numbers(notification): possible_senders = dao_get_sms_senders_by_service_id(notification.service_id) sender_numbers = [] diff --git a/app/models.py b/app/models.py index ec6eac335..fc7b855e4 100644 --- a/app/models.py +++ b/app/models.py @@ -577,7 +577,16 @@ def get_inbound_number(self): return self.inbound_number.number def get_default_sms_sender(self): - default_sms_sender = [x for x in self.service_sms_senders if x.is_default] + # notify-api-1513 let's try a minimalistic fix + # to see if we can get the right numbers back + default_sms_sender = [ + x + for x in self.service_sms_senders + if x.is_default and x.service_id == self.id + ] + current_app.logger.info( + f"#notify-api-1513 senders for service {self.name} are {self.service_sms_senders}" + ) return default_sms_sender[0].sms_sender def get_default_reply_to_email_address(self): From 2b3c9c869e550bc640ae797b946dea9fcc1e70df Mon Sep 17 00:00:00 2001 From: Andrew Shumway Date: Tue, 7 Jan 2025 09:27:04 -0700 Subject: [PATCH 08/15] Change delivery receipts tasks time to help UI lag --- app/celery/scheduled_tasks.py | 2 +- app/config.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/app/celery/scheduled_tasks.py b/app/celery/scheduled_tasks.py index 3a3fa696e..2dcd570cc 100644 --- a/app/celery/scheduled_tasks.py +++ b/app/celery/scheduled_tasks.py @@ -256,7 +256,7 @@ def process_delivery_receipts(self): cloudwatch = AwsCloudwatchClient() cloudwatch.init_app(current_app) - start_time = aware_utcnow() - timedelta(minutes=10) + start_time = aware_utcnow() - timedelta(minutes=3) end_time = aware_utcnow() delivered_receipts, failed_receipts = cloudwatch.check_delivery_receipts( start_time, end_time diff --git a/app/config.py b/app/config.py index 53a2f9a0d..d3f2a5197 100644 --- a/app/config.py +++ b/app/config.py @@ -200,7 +200,7 @@ class Config(object): }, "process-delivery-receipts": { "task": "process-delivery-receipts", - "schedule": timedelta(minutes=8), + "schedule": timedelta(minutes=2), "options": {"queue": QueueNames.PERIODIC}, }, "expire-or-delete-invitations": { From 1c67478d5e50ba74cad8487c0f8a67a99a3e8071 Mon Sep 17 00:00:00 2001 From: Carlo Costino Date: Tue, 7 Jan 2025 15:27:05 -0500 Subject: [PATCH 09/15] Update daily_check GitHub Action This changeset updates the reference of the upload_artifacts action from GitHub to be v4 instead of v3. v3 is being deprecated at the end of January 2025. Signed-off-by: Carlo Costino --- .github/workflows/daily_checks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/daily_checks.yml b/.github/workflows/daily_checks.yml index 21374e219..d8e19de98 100644 --- a/.github/workflows/daily_checks.yml +++ b/.github/workflows/daily_checks.yml @@ -46,7 +46,7 @@ jobs: - name: Run scan run: bandit -r app/ -f txt -o /tmp/bandit-output.txt --confidence-level medium - name: Upload bandit artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: bandit-report path: /tmp/bandit-output.txt From 0d1a98914a4d6df8734973db9a94a75d7b10543d Mon Sep 17 00:00:00 2001 From: Kenneth Kehl <@kkehl@flexion.us> Date: Wed, 8 Jan 2025 08:44:49 -0800 Subject: [PATCH 10/15] cleanup pending notifications --- app/celery/scheduled_tasks.py | 8 ++++++++ app/config.py | 5 +++++ app/dao/jobs_dao.py | 2 +- app/dao/notifications_dao.py | 17 +++++++++++++++++ .../notification_dao/test_notification_dao.py | 18 ++++++++++++++++++ 5 files changed, 49 insertions(+), 1 deletion(-) diff --git a/app/celery/scheduled_tasks.py b/app/celery/scheduled_tasks.py index 2dcd570cc..cb0e0886e 100644 --- a/app/celery/scheduled_tasks.py +++ b/app/celery/scheduled_tasks.py @@ -24,6 +24,7 @@ find_missing_row_for_job, ) from app.dao.notifications_dao import ( + dao_close_out_delivery_receipts, dao_update_delivery_receipts, notifications_not_yet_sent, ) @@ -278,3 +279,10 @@ def process_delivery_receipts(self): current_app.logger.error( "Failed process delivery receipts after max retries" ) + + +@notify_celery.task( + bind=True, max_retries=2, default_retry_delay=3600, name="cleanup-delivery-receipts" +) +def cleanup_delivery_receipts(self): + dao_close_out_delivery_receipts() diff --git a/app/config.py b/app/config.py index d3f2a5197..580495731 100644 --- a/app/config.py +++ b/app/config.py @@ -203,6 +203,11 @@ class Config(object): "schedule": timedelta(minutes=2), "options": {"queue": QueueNames.PERIODIC}, }, + "cleanup-delivery-receipts": { + "task": "cleanup-delivery-receipts", + "schedule": timedelta(minutes=82), + "options": {"queue": QueueNames.PERIODIC}, + }, "expire-or-delete-invitations": { "task": "expire-or-delete-invitations", "schedule": timedelta(minutes=66), diff --git a/app/dao/jobs_dao.py b/app/dao/jobs_dao.py index ddec26956..c969c4b53 100644 --- a/app/dao/jobs_dao.py +++ b/app/dao/jobs_dao.py @@ -45,7 +45,7 @@ def dao_get_job_by_service_id_and_job_id(service_id, job_id): def dao_get_unfinished_jobs(): stmt = select(Job).filter(Job.processing_finished.is_(None)) - return db.session.execute(stmt).all() + return db.session.execute(stmt).scalars().all() def dao_get_jobs_by_service_id( diff --git a/app/dao/notifications_dao.py b/app/dao/notifications_dao.py index 139f7ae8a..36eeafa92 100644 --- a/app/dao/notifications_dao.py +++ b/app/dao/notifications_dao.py @@ -780,3 +780,20 @@ def dao_update_delivery_receipts(receipts, delivered): f"#loadtestperformance batch update query time: \ updated {len(receipts)} notification in {elapsed_time} ms" ) + + +def dao_close_out_delivery_receipts(): + THREE_DAYS_AGO = utc_now() - timedelta(minutes=3) + stmt = ( + update(Notification) + .where( + Notification.status == NotificationStatus.PENDING, + Notification.sent_at < THREE_DAYS_AGO, + ) + .values(status=NotificationStatus.FAILED, provider_response="Technical Failure") + ) + result = db.session.execute(stmt) + current_app.logger.info( + f"Marked {result.rowcount} notifications as technical failures" + ) + db.session.commit() diff --git a/tests/app/dao/notification_dao/test_notification_dao.py b/tests/app/dao/notification_dao/test_notification_dao.py index 6e09f182a..f6905a749 100644 --- a/tests/app/dao/notification_dao/test_notification_dao.py +++ b/tests/app/dao/notification_dao/test_notification_dao.py @@ -11,6 +11,7 @@ from app import db from app.dao.notifications_dao import ( + dao_close_out_delivery_receipts, dao_create_notification, dao_delete_notifications_by_id, dao_get_last_notification_added_for_job_id, @@ -2026,6 +2027,23 @@ def test_update_delivery_receipts(mocker): assert "provider_response" in kwargs +def test_close_out_delivery_receipts(mocker): + mock_session = mocker.patch("app.dao.notifications_dao.db.session") + mock_update = MagicMock() + mock_where = MagicMock() + mock_values = MagicMock() + mock_update.where.return_value = mock_where + mock_where.values.return_value = mock_values + + mock_session.execute.return_value = None + with patch("app.dao.notifications_dao.update", return_value=mock_update): + dao_close_out_delivery_receipts() + mock_update.where.assert_called_once() + mock_where.values.assert_called_once() + mock_session.execute.assert_called_once_with(mock_values) + mock_session.commit.assert_called_once() + + @pytest.mark.parametrize( "created_at_utc,date_to_check,expected_count", [ From a2fc97000b4c36db69ea9411fa1120a88e27c662 Mon Sep 17 00:00:00 2001 From: Kenneth Kehl <@kkehl@flexion.us> Date: Wed, 8 Jan 2025 08:58:24 -0800 Subject: [PATCH 11/15] cleanup pending notifications --- app/dao/notifications_dao.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/app/dao/notifications_dao.py b/app/dao/notifications_dao.py index 36eeafa92..c8f2797a0 100644 --- a/app/dao/notifications_dao.py +++ b/app/dao/notifications_dao.py @@ -793,7 +793,9 @@ def dao_close_out_delivery_receipts(): .values(status=NotificationStatus.FAILED, provider_response="Technical Failure") ) result = db.session.execute(stmt) - current_app.logger.info( - f"Marked {result.rowcount} notifications as technical failures" - ) + db.session.commit() + if result: + current_app.logger.info( + f"Marked {result.rowcount} notifications as technical failures" + ) From da19e7c81c50f071058ab622302b633051698caf Mon Sep 17 00:00:00 2001 From: Kenneth Kehl <@kkehl@flexion.us> Date: Wed, 8 Jan 2025 11:12:08 -0800 Subject: [PATCH 12/15] set prefetch multiplier to 2 and increase concurrency to 15 --- manifest.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifest.yml b/manifest.yml index 39e842730..9d39c7d84 100644 --- a/manifest.yml +++ b/manifest.yml @@ -26,7 +26,7 @@ applications: - type: worker instances: ((worker_instances)) memory: ((worker_memory)) - command: newrelic-admin run-program celery -A run_celery.notify_celery worker --loglevel=INFO --pool=threads --concurrency=10 + command: newrelic-admin run-program celery -A run_celery.notify_celery worker --loglevel=INFO --pool=threads --concurrency=15 --prefetch-multiplier=2 - type: scheduler instances: 1 memory: ((scheduler_memory)) From 6aae2c7aae7ea770fe904c1bd3dc5cc5d1b385f3 Mon Sep 17 00:00:00 2001 From: Kenneth Kehl <@kkehl@flexion.us> Date: Thu, 9 Jan 2025 10:53:33 -0800 Subject: [PATCH 13/15] fix db connection pool --- app/__init__.py | 18 +++++++++++++++++- app/celery/scheduled_tasks.py | 2 ++ app/clients/__init__.py | 3 +-- app/config.py | 2 +- 4 files changed, 21 insertions(+), 4 deletions(-) diff --git a/app/__init__.py b/app/__init__.py index 23c2399e1..0d617ee0c 100644 --- a/app/__init__.py +++ b/app/__init__.py @@ -18,6 +18,7 @@ from werkzeug.exceptions import HTTPException as WerkzeugHTTPException from werkzeug.local import LocalProxy +from app import config from app.clients import NotificationProviderClients from app.clients.cloudwatch.aws_cloudwatch import AwsCloudwatchClient from app.clients.document_download import DocumentDownloadClient @@ -25,6 +26,7 @@ from app.clients.email.aws_ses_stub import AwsSesStubClient from app.clients.pinpoint.aws_pinpoint import AwsPinpointClient from app.clients.sms.aws_sns import AwsSnsClient +from app.utils import hilite from notifications_utils import logging, request_helper from notifications_utils.clients.encryption.encryption_client import Encryption from notifications_utils.clients.redis.redis_client import RedisClient @@ -58,15 +60,29 @@ class SQLAlchemy(_SQLAlchemy): def apply_driver_hacks(self, app, info, options): sa_url, options = super().apply_driver_hacks(app, info, options) + print(hilite(f"OPTIONS {options}")) + if "connect_args" not in options: options["connect_args"] = {} options["connect_args"]["options"] = "-c statement_timeout={}".format( int(app.config["SQLALCHEMY_STATEMENT_TIMEOUT"]) * 1000 ) + return (sa_url, options) -db = SQLAlchemy() +# Set db engine settings here for now. +# They were not being set previous (despite environmental variables with appropriate +# sounding names) and were defaulting to low values +db = SQLAlchemy( + engine_options={ + "pool_size": config.Config.SQLALCHEMY_POOL_SIZE, + "max_overflow": 10, + "pool_timeout": config.Config.SQLALCHEMY_POOL_TIMEOUT, + "pool_recycle": config.Config.SQLALCHEMY_POOL_RECYCLE, + "pool_pre_ping": True, + } +) migrate = Migrate() ma = Marshmallow() notify_celery = NotifyCelery() diff --git a/app/celery/scheduled_tasks.py b/app/celery/scheduled_tasks.py index cb0e0886e..72806aa58 100644 --- a/app/celery/scheduled_tasks.py +++ b/app/celery/scheduled_tasks.py @@ -243,6 +243,8 @@ def check_for_services_with_high_failure_rates_or_sending_to_tv_numbers(): bind=True, max_retries=7, default_retry_delay=3600, name="process-delivery-receipts" ) def process_delivery_receipts(self): + # If we need to check db settings do it here for convenience + # current_app.logger.info(f"POOL SIZE {app.db.engine.pool.size()}") """ Every eight minutes or so (see config.py) we run this task, which searches the last ten minutes of logs for delivery receipts and batch updates the db with the results. The overlap diff --git a/app/clients/__init__.py b/app/clients/__init__.py index 3392928e4..f185e45e2 100644 --- a/app/clients/__init__.py +++ b/app/clients/__init__.py @@ -13,8 +13,7 @@ "addressing_style": "virtual", }, use_fips_endpoint=True, - # This is the default but just for doc sake - max_pool_connections=10, + max_pool_connections=50, # This should be equal or greater than our celery concurrency ) diff --git a/app/config.py b/app/config.py index 580495731..9ae731290 100644 --- a/app/config.py +++ b/app/config.py @@ -81,7 +81,7 @@ class Config(object): SQLALCHEMY_DATABASE_URI = cloud_config.database_url SQLALCHEMY_RECORD_QUERIES = False SQLALCHEMY_TRACK_MODIFICATIONS = False - SQLALCHEMY_POOL_SIZE = int(getenv("SQLALCHEMY_POOL_SIZE", 5)) + SQLALCHEMY_POOL_SIZE = int(getenv("SQLALCHEMY_POOL_SIZE", 20)) SQLALCHEMY_POOL_TIMEOUT = 30 SQLALCHEMY_POOL_RECYCLE = 300 SQLALCHEMY_STATEMENT_TIMEOUT = 1200 From 2770f76431c2c5ebde3f461002f9a9d22d0e6adb Mon Sep 17 00:00:00 2001 From: Kenneth Kehl <@kkehl@flexion.us> Date: Thu, 9 Jan 2025 11:14:51 -0800 Subject: [PATCH 14/15] cleanup --- app/__init__.py | 2 -- app/config.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/app/__init__.py b/app/__init__.py index 0d617ee0c..add218e5d 100644 --- a/app/__init__.py +++ b/app/__init__.py @@ -26,7 +26,6 @@ from app.clients.email.aws_ses_stub import AwsSesStubClient from app.clients.pinpoint.aws_pinpoint import AwsPinpointClient from app.clients.sms.aws_sns import AwsSnsClient -from app.utils import hilite from notifications_utils import logging, request_helper from notifications_utils.clients.encryption.encryption_client import Encryption from notifications_utils.clients.redis.redis_client import RedisClient @@ -60,7 +59,6 @@ class SQLAlchemy(_SQLAlchemy): def apply_driver_hacks(self, app, info, options): sa_url, options = super().apply_driver_hacks(app, info, options) - print(hilite(f"OPTIONS {options}")) if "connect_args" not in options: options["connect_args"] = {} diff --git a/app/config.py b/app/config.py index 9ae731290..f7f08a36a 100644 --- a/app/config.py +++ b/app/config.py @@ -81,7 +81,7 @@ class Config(object): SQLALCHEMY_DATABASE_URI = cloud_config.database_url SQLALCHEMY_RECORD_QUERIES = False SQLALCHEMY_TRACK_MODIFICATIONS = False - SQLALCHEMY_POOL_SIZE = int(getenv("SQLALCHEMY_POOL_SIZE", 20)) + SQLALCHEMY_POOL_SIZE = int(getenv("SQLALCHEMY_POOL_SIZE", 40)) SQLALCHEMY_POOL_TIMEOUT = 30 SQLALCHEMY_POOL_RECYCLE = 300 SQLALCHEMY_STATEMENT_TIMEOUT = 1200 From 7e7d43238fabc60f8c338f9e7d005c3070fcee0f Mon Sep 17 00:00:00 2001 From: Kenneth Kehl <@kkehl@flexion.us> Date: Thu, 9 Jan 2025 11:16:53 -0800 Subject: [PATCH 15/15] cleanup --- manifest.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifest.yml b/manifest.yml index 9d39c7d84..0763a1911 100644 --- a/manifest.yml +++ b/manifest.yml @@ -26,7 +26,7 @@ applications: - type: worker instances: ((worker_instances)) memory: ((worker_memory)) - command: newrelic-admin run-program celery -A run_celery.notify_celery worker --loglevel=INFO --pool=threads --concurrency=15 --prefetch-multiplier=2 + command: newrelic-admin run-program celery -A run_celery.notify_celery worker --loglevel=INFO --pool=threads --concurrency=10 --prefetch-multiplier=2 - type: scheduler instances: 1 memory: ((scheduler_memory))