From a85eecf1ee1e195da8f14bdfe3c2bd41a762a515 Mon Sep 17 00:00:00 2001 From: Kyle Neale Date: Thu, 26 Dec 2024 12:20:31 -0500 Subject: [PATCH] Supabase integration (#19307) * Supabase integration * Add initial release changelog * ddev validate ci --sync * add metric check * sync metadata * validate labeler * Overwrite default process start time metric * Added missed metrics * Override another default go metric --- .codecov.yml | 9 + .github/workflows/config/labeler.yml | 2 + .github/workflows/test-all.yml | 20 + supabase/CHANGELOG.md | 4 + supabase/README.md | 60 ++ supabase/assets/configuration/spec.yaml | 28 + .../assets/dashboards/supabase_overview.json | 77 ++ supabase/assets/service_checks.json | 32 + supabase/changelog.d/19307.added | 1 + supabase/datadog_checks/__init__.py | 4 + supabase/datadog_checks/supabase/__about__.py | 4 + supabase/datadog_checks/supabase/__init__.py | 7 + supabase/datadog_checks/supabase/check.py | 57 ++ .../supabase/config_models/__init__.py | 24 + .../supabase/config_models/defaults.py | 124 +++ .../supabase/config_models/instance.py | 173 ++++ .../supabase/config_models/shared.py | 45 + .../supabase/config_models/validators.py | 13 + .../supabase/data/conf.yaml.example | 600 ++++++++++++ supabase/datadog_checks/supabase/metrics.py | 286 ++++++ supabase/hatch.toml | 4 + supabase/manifest.json | 55 ++ supabase/metadata.csv | 266 +++++ supabase/pyproject.toml | 60 ++ supabase/tests/__init__.py | 3 + supabase/tests/common.py | 313 ++++++ supabase/tests/conftest.py | 30 + supabase/tests/docker/Caddyfile | 15 + supabase/tests/docker/docker-compose.yaml | 12 + .../tests/fixtures/privileged_metrics.txt | 924 ++++++++++++++++++ .../tests/fixtures/storage_api_metrics.txt | 274 ++++++ supabase/tests/test_e2e.py | 13 + supabase/tests/test_unit.py | 50 + 33 files changed, 3589 insertions(+) create mode 100644 supabase/CHANGELOG.md create mode 100644 supabase/README.md create mode 100644 supabase/assets/configuration/spec.yaml create mode 100644 supabase/assets/dashboards/supabase_overview.json create mode 100644 supabase/assets/service_checks.json create mode 100644 supabase/changelog.d/19307.added create mode 100644 supabase/datadog_checks/__init__.py create mode 100644 supabase/datadog_checks/supabase/__about__.py create mode 100644 supabase/datadog_checks/supabase/__init__.py create mode 100644 supabase/datadog_checks/supabase/check.py create mode 100644 supabase/datadog_checks/supabase/config_models/__init__.py create mode 100644 supabase/datadog_checks/supabase/config_models/defaults.py create mode 100644 supabase/datadog_checks/supabase/config_models/instance.py create mode 100644 supabase/datadog_checks/supabase/config_models/shared.py create mode 100644 supabase/datadog_checks/supabase/config_models/validators.py create mode 100644 supabase/datadog_checks/supabase/data/conf.yaml.example create mode 100644 supabase/datadog_checks/supabase/metrics.py create mode 100644 supabase/hatch.toml create mode 100644 supabase/manifest.json create mode 100644 supabase/metadata.csv create mode 100644 supabase/pyproject.toml create mode 100644 supabase/tests/__init__.py create mode 100644 supabase/tests/common.py create mode 100644 supabase/tests/conftest.py create mode 100644 supabase/tests/docker/Caddyfile create mode 100644 supabase/tests/docker/docker-compose.yaml create mode 100644 supabase/tests/fixtures/privileged_metrics.txt create mode 100644 supabase/tests/fixtures/storage_api_metrics.txt create mode 100644 supabase/tests/test_e2e.py create mode 100644 supabase/tests/test_unit.py diff --git a/.codecov.yml b/.codecov.yml index 631efd9665f12..b3da4ec81667d 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -590,6 +590,10 @@ coverage: target: 75 flags: - strimzi + Supabase: + target: 75 + flags: + - supabase Supervisord: target: 75 flags: @@ -1500,6 +1504,11 @@ flags: paths: - strimzi/datadog_checks/strimzi - strimzi/tests + supabase: + carryforward: true + paths: + - supabase/datadog_checks/supabase + - supabase/tests supervisord: carryforward: true paths: diff --git a/.github/workflows/config/labeler.yml b/.github/workflows/config/labeler.yml index c552e82a1258a..b548315a04731 100644 --- a/.github/workflows/config/labeler.yml +++ b/.github/workflows/config/labeler.yml @@ -525,6 +525,8 @@ integration/streamnative: - streamnative/**/* integration/strimzi: - strimzi/**/* +integration/supabase: +- supabase/**/* integration/supervisord: - supervisord/**/* integration/suricata: diff --git a/.github/workflows/test-all.yml b/.github/workflows/test-all.yml index ce300bf301eb0..bf58388e435be 100644 --- a/.github/workflows/test-all.yml +++ b/.github/workflows/test-all.yml @@ -3494,6 +3494,26 @@ jobs: minimum-base-package: ${{ inputs.minimum-base-package }} pytest-args: ${{ inputs.pytest-args }} secrets: inherit + jfe7ceb7: + uses: ./.github/workflows/test-target.yml + with: + job-name: Supabase + target: supabase + platform: linux + runner: '["ubuntu-22.04"]' + repo: "${{ inputs.repo }}" + python-version: "${{ inputs.python-version }}" + standard: ${{ inputs.standard }} + latest: ${{ inputs.latest }} + agent-image: "${{ inputs.agent-image }}" + agent-image-py2: "${{ inputs.agent-image-py2 }}" + agent-image-windows: "${{ inputs.agent-image-windows }}" + agent-image-windows-py2: "${{ inputs.agent-image-windows-py2 }}" + test-py2: ${{ inputs.test-py2 }} + test-py3: ${{ inputs.test-py3 }} + minimum-base-package: ${{ inputs.minimum-base-package }} + pytest-args: ${{ inputs.pytest-args }} + secrets: inherit jf04a052: uses: ./.github/workflows/test-target.yml with: diff --git a/supabase/CHANGELOG.md b/supabase/CHANGELOG.md new file mode 100644 index 0000000000000..59c3ebcf6b575 --- /dev/null +++ b/supabase/CHANGELOG.md @@ -0,0 +1,4 @@ +# CHANGELOG - supabase + + + diff --git a/supabase/README.md b/supabase/README.md new file mode 100644 index 0000000000000..e7bb5b92dcea0 --- /dev/null +++ b/supabase/README.md @@ -0,0 +1,60 @@ +# Agent Check: supabase + +## Overview + +This check monitors [supabase][1] through the Datadog Agent. + +Include a high level overview of what this integration does: +- What does your product do (in 1-2 sentences)? +- What value will customers get from this integration, and why is it valuable to them? +- What specific data will your integration monitor, and what's the value of that data? + +## Setup + +Follow the instructions below to install and configure this check for an Agent running on a host. For containerized environments, see the [Autodiscovery Integration Templates][3] for guidance on applying these instructions. + +### Installation + +The supabase check is included in the [Datadog Agent][2] package. +No additional installation is needed on your server. + +### Configuration + +1. Edit the `supabase.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your supabase performance data. See the [sample supabase.d/conf.yaml][4] for all available configuration options. + +2. [Restart the Agent][5]. + +### Validation + +[Run the Agent's status subcommand][6] and look for `supabase` under the Checks section. + +## Data Collected + +### Metrics + +See [metadata.csv][7] for a list of metrics provided by this integration. + +### Events + +The supabase integration does not include any events. + +### Service Checks + +The supabase integration does not include any service checks. + +See [service_checks.json][8] for a list of service checks provided by this integration. + +## Troubleshooting + +Need help? Contact [Datadog support][9]. + + +[1]: **LINK_TO_INTEGRATION_SITE** +[2]: https://app.datadoghq.com/account/settings/agent/latest +[3]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[4]: https://github.com/DataDog/integrations-core/blob/master/supabase/datadog_checks/supabase/data/conf.yaml.example +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[7]: https://github.com/DataDog/integrations-core/blob/master/supabase/metadata.csv +[8]: https://github.com/DataDog/integrations-core/blob/master/supabase/assets/service_checks.json +[9]: https://docs.datadoghq.com/help/ diff --git a/supabase/assets/configuration/spec.yaml b/supabase/assets/configuration/spec.yaml new file mode 100644 index 0000000000000..14ceebef476cc --- /dev/null +++ b/supabase/assets/configuration/spec.yaml @@ -0,0 +1,28 @@ +name: Supabase +files: +- name: supabase.yaml + options: + - template: init_config + options: + - template: init_config/default + - template: instances + options: + - name: priviledged_metrics_endpoint + description: | + Endpoint exposing Supabase customer metrics + https://supabase.com/docs/guides/monitoring-troubleshooting/metrics#accessing-the-metrics-endpoint + value: + display_default: null + example: https://.supabase.co/customer/v1/privileged/metrics + type: string + - name: storage_api_endpoint + description: | + Endpoint exposing the S3 Storage API Prometheus metrics. + value: + display_default: null + example: http://%%host%%:5000/metrics + type: string + - template: instances/openmetrics + overrides: + openmetrics_endpoint.required: false + openmetrics_endpoint.hidden: true \ No newline at end of file diff --git a/supabase/assets/dashboards/supabase_overview.json b/supabase/assets/dashboards/supabase_overview.json new file mode 100644 index 0000000000000..eac5ccdf59ebc --- /dev/null +++ b/supabase/assets/dashboards/supabase_overview.json @@ -0,0 +1,77 @@ +{ + "title": "Supabase Overview", + "description": "## Supabase\n", + "widgets": [ + { + "id": 4717263751542750, + "definition": { + "title": "", + "banner_img": "/static/images/logos/supabase_large.svg", + "show_title": true, + "type": "group", + "layout_type": "ordered", + "widgets": [ + { + "id": 5685022835071772, + "definition": { + "type": "note", + "content": "## Supabase\n", + "background_color": "white", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true + }, + "layout": { + "x": 0, + "y": 0, + "width": 3, + "height": 3 + } + }, + { + "id": 8921963557059570, + "definition": { + "type": "note", + "content": "", + "background_color": "white", + "font_size": "14", + "text_align": "center", + "vertical_align": "center", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true + }, + "layout": { + "x": 3, + "y": 0, + "width": 3, + "height": 3 + } + } + ] + }, + "layout": { + "x": 0, + "y": 0, + "width": 6, + "height": 6 + } + } + ], + "template_variables": [ + { + "name": "host", + "prefix": "host", + "available_values": [], + "default": "*" + } + ], + "layout_type": "ordered", + "notify_list": [], + "reflow_type": "fixed" +} \ No newline at end of file diff --git a/supabase/assets/service_checks.json b/supabase/assets/service_checks.json new file mode 100644 index 0000000000000..f1b12843fedde --- /dev/null +++ b/supabase/assets/service_checks.json @@ -0,0 +1,32 @@ +[ + { + "agent_version": "7.62.0", + "integration": "Supabase", + "check": "supabase.openmetrics.health", + "statuses": [ + "ok", + "critical" + ], + "groups": [ + "host", + "endpoint" + ], + "name": "Supabase OpenMetrics endpoint health", + "description": "Returns `CRITICAL` if the Agent is unable to connect to the Supabase OpenMetrics endpoint, otherwise returns `OK`." + }, + { + "agent_version": "7.62.0", + "integration": "Supabase", + "check": "supabase.storage_api.openmetrics.health", + "statuses": [ + "ok", + "critical" + ], + "groups": [ + "host", + "endpoint" + ], + "name": "Supabase Storage API OpenMetrics endpoint health", + "description": "Returns `CRITICAL` if the Agent is unable to connect to the Supabase Storage API OpenMetrics endpoint, otherwise returns `OK`." + } +] \ No newline at end of file diff --git a/supabase/changelog.d/19307.added b/supabase/changelog.d/19307.added new file mode 100644 index 0000000000000..aa949b47b7b41 --- /dev/null +++ b/supabase/changelog.d/19307.added @@ -0,0 +1 @@ +Initial Release \ No newline at end of file diff --git a/supabase/datadog_checks/__init__.py b/supabase/datadog_checks/__init__.py new file mode 100644 index 0000000000000..1517d901c0aae --- /dev/null +++ b/supabase/datadog_checks/__init__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/supabase/datadog_checks/supabase/__about__.py b/supabase/datadog_checks/supabase/__about__.py new file mode 100644 index 0000000000000..e9541ce83e9e5 --- /dev/null +++ b/supabase/datadog_checks/supabase/__about__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__version__ = '0.0.1' diff --git a/supabase/datadog_checks/supabase/__init__.py b/supabase/datadog_checks/supabase/__init__.py new file mode 100644 index 0000000000000..e3ec3035e8ec2 --- /dev/null +++ b/supabase/datadog_checks/supabase/__init__.py @@ -0,0 +1,7 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .__about__ import __version__ +from .check import SupabaseCheck + +__all__ = ['__version__', 'SupabaseCheck'] diff --git a/supabase/datadog_checks/supabase/check.py b/supabase/datadog_checks/supabase/check.py new file mode 100644 index 0000000000000..5e1cd85b581eb --- /dev/null +++ b/supabase/datadog_checks/supabase/check.py @@ -0,0 +1,57 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from typing import Any # noqa: F401 + +from datadog_checks.base import ConfigurationError, OpenMetricsBaseCheckV2 # noqa: F401 + +from .config_models import ConfigMixin +from .metrics import RENAME_LABELS_MAP, STORAGE_API_METRICS, SUPABASE_METRICS + +( + PRIVILEGED_METRICS_NAMESPACE, + STORAGE_API_METRICS_NAMESPACE, +) = [ + 'supabase', + 'supabase.storage_api', +] + + +class SupabaseCheck(OpenMetricsBaseCheckV2, ConfigMixin): + + DEFAULT_METRIC_LIMIT = 0 + + def __init__(self, name, init_config, instances=None): + super(SupabaseCheck, self).__init__(name, init_config, instances) + self.check_initializations.appendleft(self.parse_config) + # Use self.instance to read the check configuration + # self.url = self.instance.get("url") + + def parse_config(self): + self.scraper_configs = [] + privileged_metrics_endpoint = self.instance.get("privileged_metrics_endpoint") + storage_api_endpoint = self.instance.get("storage_api_endpoint") + + if not privileged_metrics_endpoint and not storage_api_endpoint: + raise ConfigurationError( + "Must specify at least one of the following:" "`privileged_metrics_endpoint` or `storage_api_endpoint`." + ) + + if privileged_metrics_endpoint: + self.scraper_configs.append( + self.generate_config(privileged_metrics_endpoint, PRIVILEGED_METRICS_NAMESPACE, SUPABASE_METRICS) + ) + if storage_api_endpoint: + self.scraper_configs.append( + self.generate_config(storage_api_endpoint, STORAGE_API_METRICS_NAMESPACE, STORAGE_API_METRICS) + ) + + def generate_config(self, endpoint, namespace, metrics): + config = { + 'openmetrics_endpoint': endpoint, + 'metrics': metrics, + 'namespace': namespace, + 'rename_labels': RENAME_LABELS_MAP, + } + config.update(self.instance) + return config diff --git a/supabase/datadog_checks/supabase/config_models/__init__.py b/supabase/datadog_checks/supabase/config_models/__init__.py new file mode 100644 index 0000000000000..106fff2032f68 --- /dev/null +++ b/supabase/datadog_checks/supabase/config_models/__init__.py @@ -0,0 +1,24 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/supabase/datadog_checks/supabase/config_models/defaults.py b/supabase/datadog_checks/supabase/config_models/defaults.py new file mode 100644 index 0000000000000..0138cd77a5ea8 --- /dev/null +++ b/supabase/datadog_checks/supabase/config_models/defaults.py @@ -0,0 +1,124 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + + +def instance_allow_redirects(): + return True + + +def instance_auth_type(): + return 'basic' + + +def instance_cache_metric_wildcards(): + return True + + +def instance_cache_shared_labels(): + return True + + +def instance_collect_counters_with_distributions(): + return False + + +def instance_collect_histogram_buckets(): + return True + + +def instance_disable_generic_tags(): + return False + + +def instance_empty_default_hostname(): + return False + + +def instance_enable_health_service_check(): + return True + + +def instance_histogram_buckets_as_distributions(): + return False + + +def instance_ignore_connection_errors(): + return False + + +def instance_kerberos_auth(): + return 'disabled' + + +def instance_kerberos_delegate(): + return False + + +def instance_kerberos_force_initiate(): + return False + + +def instance_log_requests(): + return False + + +def instance_min_collection_interval(): + return 15 + + +def instance_non_cumulative_histogram_buckets(): + return False + + +def instance_persist_connections(): + return False + + +def instance_request_size(): + return 16 + + +def instance_skip_proxy(): + return False + + +def instance_tag_by_endpoint(): + return True + + +def instance_telemetry(): + return False + + +def instance_timeout(): + return 10 + + +def instance_tls_ignore_warning(): + return False + + +def instance_tls_use_host_header(): + return False + + +def instance_tls_verify(): + return True + + +def instance_use_latest_spec(): + return False + + +def instance_use_legacy_auth_encoding(): + return True + + +def instance_use_process_start_time(): + return False diff --git a/supabase/datadog_checks/supabase/config_models/instance.py b/supabase/datadog_checks/supabase/config_models/instance.py new file mode 100644 index 0000000000000..517705d04bb9f --- /dev/null +++ b/supabase/datadog_checks/supabase/config_models/instance.py @@ -0,0 +1,173 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from types import MappingProxyType +from typing import Any, Optional, Union + +from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class AuthToken(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + reader: Optional[MappingProxyType[str, Any]] = None + writer: Optional[MappingProxyType[str, Any]] = None + + +class ExtraMetrics(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra='allow', + frozen=True, + ) + name: Optional[str] = None + type: Optional[str] = None + + +class MetricPatterns(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + exclude: Optional[tuple[str, ...]] = None + include: Optional[tuple[str, ...]] = None + + +class Metrics(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra='allow', + frozen=True, + ) + name: Optional[str] = None + type: Optional[str] = None + + +class Proxy(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + http: Optional[str] = None + https: Optional[str] = None + no_proxy: Optional[tuple[str, ...]] = None + + +class ShareLabels(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + labels: Optional[tuple[str, ...]] = None + match: Optional[tuple[str, ...]] = None + + +class InstanceConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + allow_redirects: Optional[bool] = None + auth_token: Optional[AuthToken] = None + auth_type: Optional[str] = None + aws_host: Optional[str] = None + aws_region: Optional[str] = None + aws_service: Optional[str] = None + cache_metric_wildcards: Optional[bool] = None + cache_shared_labels: Optional[bool] = None + collect_counters_with_distributions: Optional[bool] = None + collect_histogram_buckets: Optional[bool] = None + connect_timeout: Optional[float] = None + disable_generic_tags: Optional[bool] = None + empty_default_hostname: Optional[bool] = None + enable_health_service_check: Optional[bool] = None + exclude_labels: Optional[tuple[str, ...]] = None + exclude_metrics: Optional[tuple[str, ...]] = None + exclude_metrics_by_labels: Optional[MappingProxyType[str, Union[bool, tuple[str, ...]]]] = None + extra_headers: Optional[MappingProxyType[str, Any]] = None + extra_metrics: Optional[tuple[Union[str, MappingProxyType[str, Union[str, ExtraMetrics]]], ...]] = None + headers: Optional[MappingProxyType[str, Any]] = None + histogram_buckets_as_distributions: Optional[bool] = None + hostname_format: Optional[str] = None + hostname_label: Optional[str] = None + ignore_connection_errors: Optional[bool] = None + ignore_tags: Optional[tuple[str, ...]] = None + include_labels: Optional[tuple[str, ...]] = None + kerberos_auth: Optional[str] = None + kerberos_cache: Optional[str] = None + kerberos_delegate: Optional[bool] = None + kerberos_force_initiate: Optional[bool] = None + kerberos_hostname: Optional[str] = None + kerberos_keytab: Optional[str] = None + kerberos_principal: Optional[str] = None + log_requests: Optional[bool] = None + metric_patterns: Optional[MetricPatterns] = None + metrics: Optional[tuple[Union[str, MappingProxyType[str, Union[str, Metrics]]], ...]] = None + min_collection_interval: Optional[float] = None + namespace: Optional[str] = Field(None, pattern='\\w*') + non_cumulative_histogram_buckets: Optional[bool] = None + ntlm_domain: Optional[str] = None + openmetrics_endpoint: Optional[str] = None + password: Optional[str] = None + persist_connections: Optional[bool] = None + priviledged_metrics_endpoint: Optional[str] = None + proxy: Optional[Proxy] = None + raw_line_filters: Optional[tuple[str, ...]] = None + raw_metric_prefix: Optional[str] = None + read_timeout: Optional[float] = None + rename_labels: Optional[MappingProxyType[str, Any]] = None + request_size: Optional[float] = None + service: Optional[str] = None + share_labels: Optional[MappingProxyType[str, Union[bool, ShareLabels]]] = None + skip_proxy: Optional[bool] = None + storage_api_endpoint: Optional[str] = None + tag_by_endpoint: Optional[bool] = None + tags: Optional[tuple[str, ...]] = None + telemetry: Optional[bool] = None + timeout: Optional[float] = None + tls_ca_cert: Optional[str] = None + tls_cert: Optional[str] = None + tls_ignore_warning: Optional[bool] = None + tls_private_key: Optional[str] = None + tls_protocols_allowed: Optional[tuple[str, ...]] = None + tls_use_host_header: Optional[bool] = None + tls_verify: Optional[bool] = None + use_latest_spec: Optional[bool] = None + use_legacy_auth_encoding: Optional[bool] = None + use_process_start_time: Optional[bool] = None + username: Optional[str] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'instance_{info.field_name}', identity)(value, field=field) + else: + value = getattr(defaults, f'instance_{info.field_name}', lambda: value)() + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_instance', identity)(model)) diff --git a/supabase/datadog_checks/supabase/config_models/shared.py b/supabase/datadog_checks/supabase/config_models/shared.py new file mode 100644 index 0000000000000..e39d447dfc4b9 --- /dev/null +++ b/supabase/datadog_checks/supabase/config_models/shared.py @@ -0,0 +1,45 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, ConfigDict, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import validators + + +class SharedConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + service: Optional[str] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'shared_{info.field_name}', identity)(value, field=field) + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_shared', identity)(model)) diff --git a/supabase/datadog_checks/supabase/config_models/validators.py b/supabase/datadog_checks/supabase/config_models/validators.py new file mode 100644 index 0000000000000..70150e85e6124 --- /dev/null +++ b/supabase/datadog_checks/supabase/config_models/validators.py @@ -0,0 +1,13 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# Here you can include additional config validators or transformers +# +# def initialize_instance(values, **kwargs): +# if 'my_option' not in values and 'my_legacy_option' in values: +# values['my_option'] = values['my_legacy_option'] +# if values.get('my_number') > 10: +# raise ValueError('my_number max value is 10, got %s' % str(values.get('my_number'))) +# +# return values diff --git a/supabase/datadog_checks/supabase/data/conf.yaml.example b/supabase/datadog_checks/supabase/data/conf.yaml.example new file mode 100644 index 0000000000000..113f1e13c4d5c --- /dev/null +++ b/supabase/datadog_checks/supabase/data/conf.yaml.example @@ -0,0 +1,600 @@ +## All options defined here are available to all instances. +# +init_config: + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Additionally, this sets the default `service` for every log source. + # + # service: + +## Every instance is scheduled independently of the others. +# +instances: + + - + ## @param priviledged_metrics_endpoint - string - optional + ## Endpoint exposing Supabase customer metrics + ## https://supabase.com/docs/guides/monitoring-troubleshooting/metrics#accessing-the-metrics-endpoint + # + # priviledged_metrics_endpoint: https://.supabase.co/customer/v1/privileged/metrics + + ## @param storage_api_endpoint - string - optional + ## Endpoint exposing the S3 Storage API Prometheus metrics. + # + # storage_api_endpoint: http://%%host%%:5000/metrics + + ## @param raw_metric_prefix - string - optional + ## A prefix that is removed from all exposed metric names, if present. + ## All configuration options will use the prefix-less name. + # + # raw_metric_prefix: _ + + ## @param extra_metrics - (list of string or mapping) - optional + ## This list defines metrics to collect from the `openmetrics_endpoint`, in addition to + ## what the check collects by default. If the check already collects a metric, then + ## metric definitions here take precedence. Metrics may be defined in 3 ways: + ## + ## 1. If the item is a string, then it represents the exposed metric name, and + ## the sent metric name will be identical. For example: + ## + ## extra_metrics: + ## - + ## - + ## 2. If the item is a mapping, then the keys represent the exposed metric names. + ## + ## a. If a value is a string, then it represents the sent metric name. For example: + ## + ## extra_metrics: + ## - : + ## - : + ## b. If a value is a mapping, then it must have a `name` and/or `type` key. + ## The `name` represents the sent metric name, and the `type` represents how + ## the metric should be handled, overriding any type information the endpoint + ## may provide. For example: + ## + ## extra_metrics: + ## - : + ## name: + ## type: + ## - : + ## name: + ## type: + ## + ## The supported native types are `gauge`, `counter`, `histogram`, and `summary`. + ## + ## Note: To collect counter metrics with names ending in `_total`, specify the metric name without the `_total` + ## suffix. For example, to collect the counter metric `promhttp_metric_handler_requests_total`, specify + ## `promhttp_metric_handler_requests`. This submits to Datadog the metric name appended with `.count`. + ## For more information, see: + ## https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#suffixes + ## + ## Regular expressions may be used to match the exposed metric names, for example: + ## + ## extra_metrics: + ## - ^network_(ingress|egress)_.+ + ## - .+: + ## type: gauge + # + # extra_metrics: [] + + ## @param exclude_metrics - list of strings - optional + ## A list of metrics to exclude, with each entry being either + ## the exact metric name or a regular expression. + ## In order to exclude all metrics but the ones matching a specific filter, + ## you can use a negative lookahead regex like: + ## - ^(?!foo).*$ + # + # exclude_metrics: [] + + ## @param exclude_metrics_by_labels - mapping - optional + ## A mapping of labels to exclude metrics with matching label name and their corresponding metric values. To match + ## all values of a label, set it to `true`. + ## + ## Note: Label filtering happens before `rename_labels`. + ## + ## For example, the following configuration instructs the check to exclude all metrics with + ## a label `worker` or a label `pid` with the value of either `23` or `42`. + ## + ## exclude_metrics_by_labels: + ## worker: true + ## pid: + ## - '23' + ## - '42' + # + # exclude_metrics_by_labels: {} + + ## @param exclude_labels - list of strings - optional + ## A list of labels to exclude, useful for high cardinality values like timestamps or UUIDs. + ## May be used in conjunction with `include_labels`. + ## Labels defined in `exclude_labels` will take precedence in case of overlap. + ## + ## Note: Label filtering happens before `rename_labels`. + # + # exclude_labels: [] + + ## @param include_labels - list of strings - optional + ## A list of labels to include. May be used in conjunction with `exclude_labels`. + ## Labels defined in `exclude_labels` will take precedence in case of overlap. + ## + ## Note: Label filtering happens before `rename_labels`. + # + # include_labels: [] + + ## @param rename_labels - mapping - optional + ## A mapping of label names to their new names. + # + # rename_labels: + # : + # : + + ## @param enable_health_service_check - boolean - optional - default: true + ## Whether or not to send a service check named `.openmetrics.health` which reports + ## the health of the `openmetrics_endpoint`. + # + # enable_health_service_check: true + + ## @param ignore_connection_errors - boolean - optional - default: false + ## Whether or not to ignore connection errors when scraping `openmetrics_endpoint`. + # + # ignore_connection_errors: false + + ## @param hostname_label - string - optional + ## Override the hostname for every metric submission with the value of one of its labels. + # + # hostname_label: + + ## @param hostname_format - string - optional + ## When `hostname_label` is set, this instructs the check how to format the values. The string + ## `` is replaced by the value of the label defined by `hostname_label`. + # + # hostname_format: + + ## @param collect_histogram_buckets - boolean - optional - default: true + ## Whether or not to send histogram buckets. + # + # collect_histogram_buckets: true + + ## @param non_cumulative_histogram_buckets - boolean - optional - default: false + ## Whether or not histogram buckets are non-cumulative and to come with a `lower_bound` tag. + # + # non_cumulative_histogram_buckets: false + + ## @param histogram_buckets_as_distributions - boolean - optional - default: false + ## Whether or not to send histogram buckets as Datadog distribution metrics. This implicitly + ## enables the `collect_histogram_buckets` and `non_cumulative_histogram_buckets` options. + ## + ## Learn more about distribution metrics: + ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#metric-types + # + # histogram_buckets_as_distributions: false + + ## @param collect_counters_with_distributions - boolean - optional - default: false + ## Whether or not to also collect the observation counter metrics ending in `.sum` and `.count` + ## when sending histogram buckets as Datadog distribution metrics. This implicitly enables the + ## `histogram_buckets_as_distributions` option. + # + # collect_counters_with_distributions: false + + ## @param use_process_start_time - boolean - optional - default: false + ## Whether to enable a heuristic for reporting counter values on the first scrape. When true, + ## the first time an endpoint is scraped, check `process_start_time_seconds` to decide whether zero + ## initial value can be assumed for counters. This requires keeping metrics in memory until the entire + ## response is received. + # + # use_process_start_time: false + + ## @param share_labels - mapping - optional + ## This mapping allows for the sharing of labels across multiple metrics. The keys represent the + ## exposed metrics from which to share labels, and the values are mappings that configure the + ## sharing behavior. Each mapping must have at least one of the following keys: + ## + ## labels - This is a list of labels to share. All labels are shared if this is not set. + ## match - This is a list of labels to match on other metrics as a condition for sharing. + ## values - This is a list of allowed values as a condition for sharing. + ## + ## To unconditionally share all labels of a metric, set it to `true`. + ## + ## For example, the following configuration instructs the check to apply all labels from `metric_a` + ## to all other metrics, the `node` label from `metric_b` to only those metrics that have a `pod` + ## label value that matches the `pod` label value of `metric_b`, and all labels from `metric_c` + ## to all other metrics if their value is equal to `23` or `42`. + ## + ## share_labels: + ## metric_a: true + ## metric_b: + ## labels: + ## - node + ## match: + ## - pod + ## metric_c: + ## values: + ## - 23 + ## - 42 + # + # share_labels: {} + + ## @param cache_shared_labels - boolean - optional - default: true + ## When `share_labels` is set, it instructs the check to cache labels collected from the first payload + ## for improved performance. + ## + ## Set this to `false` to compute label sharing for every payload at the risk of potentially increased memory usage. + # + # cache_shared_labels: true + + ## @param raw_line_filters - list of strings - optional + ## A list of regular expressions used to exclude lines read from the `openmetrics_endpoint` + ## from being parsed. + # + # raw_line_filters: [] + + ## @param cache_metric_wildcards - boolean - optional - default: true + ## Whether or not to cache data from metrics that are defined by regular expressions rather + ## than the full metric name. + # + # cache_metric_wildcards: true + + ## @param telemetry - boolean - optional - default: false + ## Whether or not to submit metrics prefixed by `.telemetry.` for debugging purposes. + # + # telemetry: false + + ## @param ignore_tags - list of strings - optional + ## A list of regular expressions used to ignore tags added by Autodiscovery and entries in the `tags` option. + # + # ignore_tags: + # - + # - + # - + + ## @param proxy - mapping - optional + ## This overrides the `proxy` setting in `init_config`. + ## + ## Set HTTP or HTTPS proxies for this instance. Use the `no_proxy` list + ## to specify hosts that must bypass proxies. + ## + ## The SOCKS protocol is also supported, for example: + ## + ## socks5://user:pass@host:port + ## + ## Using the scheme `socks5` causes the DNS resolution to happen on the + ## client, rather than on the proxy server. This is in line with `curl`, + ## which uses the scheme to decide whether to do the DNS resolution on + ## the client or proxy. If you want to resolve the domains on the proxy + ## server, use `socks5h` as the scheme. + # + # proxy: + # http: http://: + # https: https://: + # no_proxy: + # - + # - + + ## @param skip_proxy - boolean - optional - default: false + ## This overrides the `skip_proxy` setting in `init_config`. + ## + ## If set to `true`, this makes the check bypass any proxy + ## settings enabled and attempt to reach services directly. + # + # skip_proxy: false + + ## @param auth_type - string - optional - default: basic + ## The type of authentication to use. The available types (and related options) are: + ## + ## - basic + ## |__ username + ## |__ password + ## |__ use_legacy_auth_encoding + ## - digest + ## |__ username + ## |__ password + ## - ntlm + ## |__ ntlm_domain + ## |__ password + ## - kerberos + ## |__ kerberos_auth + ## |__ kerberos_cache + ## |__ kerberos_delegate + ## |__ kerberos_force_initiate + ## |__ kerberos_hostname + ## |__ kerberos_keytab + ## |__ kerberos_principal + ## - aws + ## |__ aws_region + ## |__ aws_host + ## |__ aws_service + ## + ## The `aws` auth type relies on boto3 to automatically gather AWS credentials, for example: from `.aws/credentials`. + ## Details: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#configuring-credentials + # + # auth_type: basic + + ## @param use_legacy_auth_encoding - boolean - optional - default: true + ## When `auth_type` is set to `basic`, this determines whether to encode as `latin1` rather than `utf-8`. + # + # use_legacy_auth_encoding: true + + ## @param username - string - optional + ## The username to use if services are behind basic or digest auth. + # + # username: + + ## @param password - string - optional + ## The password to use if services are behind basic or NTLM auth. + # + # password: + + ## @param ntlm_domain - string - optional + ## If your services use NTLM authentication, specify + ## the domain used in the check. For NTLM Auth, append + ## the username to domain, not as the `username` parameter. + # + # ntlm_domain: \ + + ## @param kerberos_auth - string - optional - default: disabled + ## If your services use Kerberos authentication, you can specify the Kerberos + ## strategy to use between: + ## + ## - required + ## - optional + ## - disabled + ## + ## See https://github.com/requests/requests-kerberos#mutual-authentication + # + # kerberos_auth: disabled + + ## @param kerberos_cache - string - optional + ## Sets the KRB5CCNAME environment variable. + ## It should point to a credential cache with a valid TGT. + # + # kerberos_cache: + + ## @param kerberos_delegate - boolean - optional - default: false + ## Set to `true` to enable Kerberos delegation of credentials to a server that requests delegation. + ## + ## See https://github.com/requests/requests-kerberos#delegation + # + # kerberos_delegate: false + + ## @param kerberos_force_initiate - boolean - optional - default: false + ## Set to `true` to preemptively initiate the Kerberos GSS exchange and + ## present a Kerberos ticket on the initial request (and all subsequent). + ## + ## See https://github.com/requests/requests-kerberos#preemptive-authentication + # + # kerberos_force_initiate: false + + ## @param kerberos_hostname - string - optional + ## Override the hostname used for the Kerberos GSS exchange if its DNS name doesn't + ## match its Kerberos hostname, for example: behind a content switch or load balancer. + ## + ## See https://github.com/requests/requests-kerberos#hostname-override + # + # kerberos_hostname: + + ## @param kerberos_principal - string - optional + ## Set an explicit principal, to force Kerberos to look for a + ## matching credential cache for the named user. + ## + ## See https://github.com/requests/requests-kerberos#explicit-principal + # + # kerberos_principal: + + ## @param kerberos_keytab - string - optional + ## Set the path to your Kerberos key tab file. + # + # kerberos_keytab: + + ## @param auth_token - mapping - optional + ## This allows for the use of authentication information from dynamic sources. + ## Both a reader and writer must be configured. + ## + ## The available readers are: + ## + ## - type: file + ## path (required): The absolute path for the file to read from. + ## pattern: A regular expression pattern with a single capture group used to find the + ## token rather than using the entire file, for example: Your secret is (.+) + ## - type: oauth + ## url (required): The token endpoint. + ## client_id (required): The client identifier. + ## client_secret (required): The client secret. + ## basic_auth: Whether the provider expects credentials to be transmitted in + ## an HTTP Basic Auth header. The default is: false + ## options: Mapping of additional options to pass to the provider, such as the audience + ## or the scope. For example: + ## options: + ## audience: https://example.com + ## scope: read:example + ## + ## The available writers are: + ## + ## - type: header + ## name (required): The name of the field, for example: Authorization + ## value: The template value, for example `Bearer `. The default is: + ## placeholder: The substring in `value` to replace with the token, defaults to: + # + # auth_token: + # reader: + # type: + # : + # : + # writer: + # type: + # : + # : + + ## @param aws_region - string - optional + ## If your services require AWS Signature Version 4 signing, set the region. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_region: + + ## @param aws_host - string - optional + ## If your services require AWS Signature Version 4 signing, set the host. + ## This only needs the hostname and does not require the protocol (HTTP, HTTPS, and more). + ## For example, if connecting to https://us-east-1.amazonaws.com/, set `aws_host` to `us-east-1.amazonaws.com`. + ## + ## Note: This setting is not necessary for official integrations. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_host: + + ## @param aws_service - string - optional + ## If your services require AWS Signature Version 4 signing, set the service code. For a list + ## of available service codes, see https://docs.aws.amazon.com/general/latest/gr/rande.html + ## + ## Note: This setting is not necessary for official integrations. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_service: + + ## @param tls_verify - boolean - optional - default: true + ## Instructs the check to validate the TLS certificate of services. + # + # tls_verify: true + + ## @param tls_use_host_header - boolean - optional - default: false + ## If a `Host` header is set, this enables its use for SNI (matching against the TLS certificate CN or SAN). + # + # tls_use_host_header: false + + ## @param tls_ignore_warning - boolean - optional - default: false + ## If `tls_verify` is disabled, security warnings are logged by the check. + ## Disable those by setting `tls_ignore_warning` to true. + # + # tls_ignore_warning: false + + ## @param tls_cert - string - optional + ## The path to a single file in PEM format containing a certificate as well as any + ## number of CA certificates needed to establish the certificate's authenticity for + ## use when connecting to services. It may also contain an unencrypted private key to use. + # + # tls_cert: + + ## @param tls_private_key - string - optional + ## The unencrypted private key to use for `tls_cert` when connecting to services. This is + ## required if `tls_cert` is set and it does not already contain a private key. + # + # tls_private_key: + + ## @param tls_ca_cert - string - optional + ## The path to a file of concatenated CA certificates in PEM format or a directory + ## containing several CA certificates in PEM format. If a directory, the directory + ## must have been processed using the `openssl rehash` command. See: + ## https://www.openssl.org/docs/man3.2/man1/c_rehash.html + # + # tls_ca_cert: + + ## @param tls_protocols_allowed - list of strings - optional + ## The expected versions of TLS/SSL when fetching intermediate certificates. + ## Only `SSLv3`, `TLSv1.2`, `TLSv1.3` are allowed by default. The possible values are: + ## SSLv3 + ## TLSv1 + ## TLSv1.1 + ## TLSv1.2 + ## TLSv1.3 + # + # tls_protocols_allowed: + # - SSLv3 + # - TLSv1.2 + # - TLSv1.3 + + ## @param headers - mapping - optional + ## The headers parameter allows you to send specific headers with every request. + ## You can use it for explicitly specifying the host header or adding headers for + ## authorization purposes. + ## + ## This overrides any default headers. + # + # headers: + # Host: + # X-Auth-Token: + + ## @param extra_headers - mapping - optional + ## Additional headers to send with every request. + # + # extra_headers: + # Host: + # X-Auth-Token: + + ## @param timeout - number - optional - default: 10 + ## The timeout for accessing services. + ## + ## This overrides the `timeout` setting in `init_config`. + # + # timeout: 10 + + ## @param connect_timeout - number - optional + ## The connect timeout for accessing services. Defaults to `timeout`. + # + # connect_timeout: + + ## @param read_timeout - number - optional + ## The read timeout for accessing services. Defaults to `timeout`. + # + # read_timeout: + + ## @param request_size - number - optional - default: 16 + ## The number of kibibytes (KiB) to read from streaming HTTP responses at a time. + # + # request_size: 16 + + ## @param log_requests - boolean - optional - default: false + ## Whether or not to debug log the HTTP(S) requests made, including the method and URL. + # + # log_requests: false + + ## @param persist_connections - boolean - optional - default: false + ## Whether or not to persist cookies and use connection pooling for improved performance. + # + # persist_connections: false + + ## @param allow_redirects - boolean - optional - default: true + ## Whether or not to allow URL redirection. + # + # allow_redirects: true + + ## @param tags - list of strings - optional + ## A list of tags to attach to every metric and service check emitted by this instance. + ## + ## Learn more about tagging at https://docs.datadoghq.com/tagging + # + # tags: + # - : + # - : + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Overrides any `service` defined in the `init_config` section. + # + # service: + + ## @param min_collection_interval - number - optional - default: 15 + ## This changes the collection interval of the check. For more information, see: + ## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval + # + # min_collection_interval: 15 + + ## @param empty_default_hostname - boolean - optional - default: false + ## This forces the check to send metrics with no hostname. + ## + ## This is useful for cluster-level checks. + # + # empty_default_hostname: false + + ## @param metric_patterns - mapping - optional + ## A mapping of metrics to include or exclude, with each entry being a regular expression. + ## + ## Metrics defined in `exclude` will take precedence in case of overlap. + # + # metric_patterns: + # include: + # - + # exclude: + # - diff --git a/supabase/datadog_checks/supabase/metrics.py b/supabase/datadog_checks/supabase/metrics.py new file mode 100644 index 0000000000000..3b772fc0f7ee3 --- /dev/null +++ b/supabase/datadog_checks/supabase/metrics.py @@ -0,0 +1,286 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.checks.openmetrics.v2.metrics import DEFAULT_GO_METRICS + +# https://argo-cd.readthedocs.io/en/stable/operator-manual/metrics/ +PRIVELEGED_METRICS = { + 'auth_users_user_count': 'auth_users.user_count', + 'db_sql_connection_closed_max_idle_time': 'db.sql.connection_closed_max_idle_time', + 'db_sql_connection_closed_max_idle': 'db.sql.connection_closed_max_idle', + 'db_sql_connection_closed_max_lifetime': 'db.sql.connection_closed_max_lifetime', + 'db_sql_connection_max_open': 'db.sql.connection_max_open', + 'db_sql_connection_open': 'db.sql.connection_open', + 'db_sql_connection_wait_duration_milliseconds': 'db.sql.connection_wait_duration', + 'db_sql_connection_wait': 'db.sql.connection_wait', + 'db_transmit_bytes': 'db.transmit_bytes', + 'go_memstats_last_gc_time_seconds': { + 'name': 'go.memstats.last_gc_time.seconds', + 'type': 'time_elapsed', + }, + 'http_server_duration_milliseconds': 'http.server.duration', + 'http_server_request_size_bytes': 'http.server.request.size_bytes', + 'http_server_response_size_bytes': 'http.server.response.size_bytes', + 'http_status_codes': 'http.status_codes', + 'node_cpu_guest_seconds': 'node.cpu.guest_seconds', + 'node_cpu_seconds': 'node.cpu.seconds', + 'node_disk_discard_time_seconds': 'node.disk.discard_time_seconds', + 'node_disk_discarded_sectors': 'node.disk.discarded_sectors', + 'node_disk_discards_completed': 'node.disk.discards_completed', + 'node_disk_discards_merged': 'node.disk.discards_merged', + 'node_disk_flush_requests_time_seconds': 'node.disk.flush_requests_time_seconds', + 'node_disk_flush_requests': 'node.disk.flush_requests', + 'node_disk_io_now': 'node.disk.io_now', + 'node_disk_io_time_seconds': 'node.disk.io_time_seconds', + 'node_disk_io_time_weighted_seconds': 'node.disk.io_time_weighted_seconds', + 'node_disk_read_bytes': 'node.disk.read_bytes', + 'node_disk_read_time_seconds': 'node.disk.read_time_seconds', + 'node_disk_reads_completed': 'node.disk.reads_completed', + 'node_disk_reads_merged': 'node.disk.reads_merged', + 'node_disk_write_time_seconds': 'node.disk.write_time_seconds', + 'node_disk_writes_completed': 'node.disk.writes_completed', + 'node_disk_writes_merged': 'node.disk.writes_merged', + 'node_disk_written_bytes': 'node.disk.written_bytes', + 'node_filesystem_avail_bytes': 'node.filesystem.available_bytes', + 'node_filesystem_device_error': 'node.filesystem.device_error', + 'node_filesystem_files': 'node.filesystem.files', + 'node_filesystem_files_free': 'node.filesystem.files_free', + 'node_filesystem_free_bytes': 'node.filesystem.free_bytes', + 'node_filesystem_readonly': 'node.filesystem.readonly', + 'node_filesystem_size_bytes': 'node.filesystem.size_bytes', + 'node_load1': 'node.load1', + 'node_load15': 'node.load15', + 'node_load5': 'node.load5', + 'node_memory_Active_anon_bytes': 'node.memory.active_anon_bytes', + 'node_memory_Active_bytes': 'node.memory.active_bytes', + 'node_memory_Active_file_bytes': 'node.memory.active_file_bytes', + 'node_memory_AnonHugePages_bytes': 'node.memory.anon_huge_pages_bytes', + 'node_memory_AnonPages_bytes': 'node.memory.anon_pages_bytes', + 'node_memory_Bounce_bytes': 'node.memory.bounce_bytes', + 'node_memory_Buffers_bytes': 'node.memory.buffers_bytes', + 'node_memory_Cached_bytes': 'node.memory.cached_bytes', + 'node_memory_CommitLimit_bytes': 'node.memory.commit_limit_bytes', + 'node_memory_Committed_AS_bytes': 'node.memory.committed_as_bytes', + 'node_memory_Dirty_bytes': 'node.memory.dirty_bytes', + 'node_memory_FileHugePages_bytes': 'node.memory.file_huge_pages_bytes', + 'node_memory_FilePmdMapped_bytes': 'node.memory.file_pmd_mapped_bytes', + 'node_memory_HardwareCorrupted_bytes': 'node.memory.hardware_corrupted_bytes', + 'node_memory_HugePages_Free': 'node.memory.huge_pages_free', + 'node_memory_HugePages_Rsvd': 'node.memory.huge_pages_reserved', + 'node_memory_HugePages_Surp': 'node.memory.huge_pages_surp', + 'node_memory_HugePages_Total': 'node.memory.huge_pages_total', + 'node_memory_Hugepagesize_bytes': 'node.memory.huge_page_size_bytes', + 'node_memory_Hugetlb_bytes': 'node.memory.hugetlb_bytes', + 'node_memory_Inactive_anon_bytes': 'node.memory.inactive_anon_bytes', + 'node_memory_Inactive_bytes': 'node.memory.inactive_bytes', + 'node_memory_Inactive_file_bytes': 'node.memory.inactive_file_bytes', + 'node_memory_KReclaimable_bytes': 'node.memory.kreclaimable_bytes', + 'node_memory_KernelStack_bytes': 'node.memory.kernel_stack_bytes', + 'node_memory_Mapped_bytes': 'node.memory.mapped_bytes', + 'node_memory_MemAvailable_bytes': 'node.memory.mem_available_bytes', + 'node_memory_MemFree_bytes': 'node.memory.mem_free_bytes', + 'node_memory_MemTotal_bytes': 'node.memory.mem_total_bytes', + 'node_memory_Mlocked_bytes': 'node.memory.mlocked_bytes', + 'node_memory_NFS_Unstable_bytes': 'node.memory.nfs_unstable_bytes', + 'node_memory_PageTables_bytes': 'node.memory.page_tables_bytes', + 'node_memory_Percpu_bytes': 'node.memory.percpu_bytes', + 'node_memory_SReclaimable_bytes': 'node.memory.sreclaimable_bytes', + 'node_memory_SUnreclaim_bytes': 'node.memory.sunreclaim_bytes', + 'node_memory_ShmemHugePages_bytes': 'node.memory.shmem_huge_pages_bytes', + 'node_memory_ShmemPmdMapped_bytes': 'node.memory.shmem_pmd_mapped_bytes', + 'node_memory_Shmem_bytes': 'node.memory.shmem_bytes', + 'node_memory_Slab_bytes': 'node.memory.slab_bytes', + 'node_memory_SwapCached_bytes': 'node.memory.swap_cached_bytes', + 'node_memory_SwapFree_bytes': 'node.memory.swap_free_bytes', + 'node_memory_SwapTotal_bytes': 'node.memory.swap_total_bytes', + 'node_memory_Unevictable_bytes': 'node.memory.unevictable_bytes', + 'node_memory_VmallocChunk_bytes': 'node.memory.vm_alloc_chunk_bytes', + 'node_memory_VmallocTotal_bytes': 'node.memory.vm_alloc_total_bytes', + 'node_memory_VmallocUsed_bytes': 'node.memory.vm_alloc_used_bytes', + 'node_memory_WritebackTmp_bytes': 'node.memory.writeback_tmp_bytes', + 'node_memory_Writeback_bytes': 'node.memory.writeback_bytes', + 'node_network_receive_bytes': 'node.network.receive_bytes', + 'node_network_receive_compressed': 'node.network.receive_compressed', + 'node_network_receive_drop': 'node.network.receive_drop', + 'node_network_receive_errs': 'node.network.receive_errors', + 'node_network_receive_fifo': 'node.network.receive_fifo', + 'node_network_receive_frame': 'node.network.receive_frame', + 'node_network_receive_multicast': 'node.network.receive_multicast', + 'node_network_receive_packets': 'node.network.receive_packets', + 'node_network_transmit_bytes': 'node.network.transmit_bytes', + 'node_network_transmit_carrier': 'node.network.transmit_carrier', + 'node_network_transmit_colls': 'node.network.transmit_colls', + 'node_network_transmit_compressed': 'node.network.transmit_compressed', + 'node_network_transmit_drop': 'node.network.transmit_drop', + 'node_network_transmit_errs': 'node.network.transmit_errors', + 'node_network_transmit_fifo': 'node.network.transmit_fifo', + 'node_network_transmit_packets': 'node.network.transmit_packets', + 'node_scrape_collector_duration_seconds': 'node.scrape.collector_duration_seconds', + 'node_scrape_collector_success': 'node.scrape.collector_success', + # We force type since node.vmstat.* metrics are untyped + 'node_vmstat_oom_kill': { + 'name': 'node.vmstat.oom_kill', + 'type': 'counter', + }, + 'node_vmstat_pgfault': { + 'name': 'node.vmstat.pgfault', + 'type': 'counter', + }, + 'node_vmstat_pgmajfault': { + 'name': 'node.vmstat.pgmajfault', + 'type': 'counter', + }, + 'node_vmstat_pgpgin': { + 'name': 'node.vmstat.pgpgin', + 'type': 'counter', + }, + 'node_vmstat_pgpgout': { + 'name': 'node.vmstat.pgpgout', + 'type': 'counter', + }, + 'node_vmstat_pswpin': { + 'name': 'node.vmstat.pswpin', + 'type': 'counter', + }, + 'node_vmstat_pswpout': { + 'name': 'node.vmstat.pswpout', + 'type': 'counter', + }, + 'pg_database_size_bytes': 'pg_database_size.bytes', + 'pg_database_size_mb': 'pg_database_size.mb', + 'pg_exporter_last_scrape_duration_seconds': 'pg_exporter.last_scrape_duration_seconds', + 'pg_exporter_last_scrape_error': 'pg_exporter.last_scrape_error', + 'pg_exporter_scrapes': 'pg_exporter.scrapes', + 'pg_exporter_user_queries_load_error': 'pg_exporter.user_queries_load_error', + 'pg_ls_archive_statusdir_wal_pending_count': 'pg_ls.archive_statusdir_wal_pending_count', + 'pg_scrape_collector_duration_seconds': 'pg_scrape_collector.duration_seconds', + 'pg_scrape_collector_success': 'pg_scrape_collector.success', + 'pg_settings_default_transaction_read_only': 'pg_settings.default_transaction_read_only', + 'pg_stat_activity_xact_runtime': 'pg_stat_activity.xact_runtime', + 'pg_stat_bgwriter_buffers_alloc': 'pg_stat_bgwriter.buffers_alloc', + 'pg_stat_bgwriter_buffers_backend_fsync': 'pg_stat_bgwriter.buffers_backend_fsync', + 'pg_stat_bgwriter_buffers_backend': 'pg_stat_bgwriter.buffers_backend', + 'pg_stat_bgwriter_buffers_checkpoint': 'pg_stat_bgwriter.buffers_checkpoint', + 'pg_stat_bgwriter_buffers_clean': 'pg_stat_bgwriter.buffers_clean', + 'pg_stat_bgwriter_checkpoint_sync_time': 'pg_stat_bgwriter.checkpoint_sync_time', + 'pg_stat_bgwriter_checkpoint_write_time': 'pg_stat_bgwriter.checkpoint_write_time', + 'pg_stat_bgwriter_checkpoints_req': 'pg_stat_bgwriter.checkpoints_req', + 'pg_stat_bgwriter_checkpoints_timed': 'pg_stat_bgwriter.checkpoints_timed', + 'pg_stat_bgwriter_maxwritten_clean': 'pg_stat_bgwriter.maxwritten_clean', + 'pg_stat_bgwriter_stats_reset': 'pg_stat_bgwriter.stats_reset', + 'pg_stat_database_blks_hit': 'pg_stat_database.blks_hit', + 'pg_stat_database_blks_read': 'pg_stat_database.blks_read', + 'pg_stat_database_conflicts_confl_bufferpin': 'pg_stat_database_conflicts.confl_bufferpin', + 'pg_stat_database_conflicts_confl_deadlock': 'pg_stat_database_conflicts.confl_deadlock', + 'pg_stat_database_conflicts_confl_lock': 'pg_stat_database_conflicts.confl_lock', + 'pg_stat_database_conflicts_confl_snapshot': 'pg_stat_database_conflicts.confl_snapshot', + 'pg_stat_database_conflicts_confl_tablespace': 'pg_stat_database_conflicts.confl_tablespace', + 'pg_stat_database_conflicts': 'pg_stat_database.conflicts', + 'pg_stat_database_deadlocks': 'pg_stat_database.deadlocks', + 'pg_stat_database_most_recent_reset': 'pg_stat_database.most_recent_reset', + 'pg_stat_database_num_backends': 'pg_stat_database.num_backends', + 'pg_stat_database_temp_bytes': 'pg_stat_database.temp_bytes', + 'pg_stat_database_temp_files': 'pg_stat_database.temp_files', + 'pg_stat_database_tup_deleted': 'pg_stat_database.tup_deleted', + 'pg_stat_database_tup_fetched': 'pg_stat_database.tup_fetched', + 'pg_stat_database_tup_inserted': 'pg_stat_database.tup_inserted', + 'pg_stat_database_tup_returned': 'pg_stat_database.tup_returned', + 'pg_stat_database_tup_updated': 'pg_stat_database.tup_updated', + 'pg_stat_database_xact_commit': 'pg_stat_database.xact_commit', + 'pg_stat_database_xact_rollback': 'pg_stat_database.xact_rollback', + 'pg_stat_replication_replay_lag': 'pg_stat_replication.replay_lag', + 'pg_stat_replication_send_lag': 'pg_stat_replication.send_lag', + 'pg_stat_statements_total_queries': 'pg_stat_statements.total_queries', + 'pg_stat_statements_total_time_seconds': 'pg_stat_statements.total_time_seconds', + 'pg_status_in_recovery': 'pg_status.in_recovery', + 'pg_up': 'pg.up', + 'pg_wal_size_mb': 'pg_wal.size', + 'pgrst_db_pool_available': 'pgrst.db_pool.available_connections', + 'pgrst_db_pool_max': 'pgrst.db_pool.max_connections', + 'pgrst_db_pool_timeouts': 'pgrst.db_pool.connection_timeouts', + 'pgrst_db_pool_waiting': 'pgrst.db_pool.requests_waiting', + 'pgrst_schema_cache_loads': 'pgrst.schema_cache.loads', + 'pgrst_schema_cache_query_time_seconds': 'pgrst.schema_cache.query_time_seconds', + 'physical_replication_lag_is_connected_to_primary': 'physical_replication_lag.is_connected_to_primary', + 'physical_replication_lag_is_wal_replay_paused': 'physical_replication_lag.is_wal_replay_paused', + 'physical_replication_lag_physical_replication_lag_seconds': 'physical_replication_lag.seconds', + 'postgres_exporter_build_info': 'postgres_exporter.build_info', + 'postgres_exporter_config_last_reload_success_timestamp_seconds': 'postgres_exporter.config_last_reload_success_timestamp_seconds', # noqa: E501 + 'postgres_exporter_config_last_reload_successful': 'postgres_exporter.config_last_reload_successful', + 'postgresql_restarts': 'postgresql.restarts', + 'process_start_time_seconds': { + 'name': 'process.start_time.seconds', + 'type': 'time_elapsed', + }, + 'process_runtime_go_mem_live_objects': 'process.runtime.go_mem_live_objects', + 'promhttp_metric_handler_requests_in_flight': 'promhttp_metric_handler.requests_in_flight', + 'promhttp_metric_handler_requests': 'promhttp_metric_handler.requests', + 'realtime_postgres_changes_client_subscriptions': 'realtime_postgres_changes.client_subscriptions', + 'realtime_postgres_changes_total_subscriptions': 'realtime_postgres_changes.total_subscriptions', + 'replication_slots_max_lag_bytes': 'pg_replication_slots.max_lag_bytes', + 'runtime_uptime_milliseconds': {'name': 'runtime.uptime_milliseconds', 'type': 'time_elapsed'}, + 'storage_storage_size_mb': 'storage.storage_size', + 'supabase_usage_metrics_user_queries': 'usage_metrics.user_queries', +} + +STORAGE_API_METRICS = [ + { + 'storage_api_upload_started': 'upload_started', + 'storage_api_upload_success': 'upload_success', + 'storage_api_database_query_performance': 'database_query_performance', + 'storage_api_queue_job_scheduled': 'queue.job_scheduled', + 'storage_api_queue_job_scheduled_time': 'queue.job_scheduled_time', + 'storage_api_queue_job_completed': 'queue.job_completed', + 'storage_api_queue_job_retry_failed': 'queue.job_retry_failed', + 'storage_api_queue_job_error': 'queue.job_error', + 'storage_api_s3_upload_part': 's3_upload_part', + 'storage_api_db_pool': 'db_pool', + 'storage_api_db_connections': 'db_connections', + 'storage_api_http_pool_busy_sockets': 'http_pool.busy_sockets', + 'storage_api_http_pool_free_sockets': 'http_pool.free_sockets', + 'storage_api_http_pool_requests': 'http_pool.requests', + 'storage_api_http_pool_errors': 'http_pool.errors', + 'storage_api_http_request_summary_seconds': 'http_request.summary_seconds', + 'storage_api_http_request_duration_seconds': 'http_request.duration_seconds', + 'storage_api_process_cpu_seconds': 'process_cpu.seconds', + 'storage_api_process_cpu_system_seconds': 'process_cpu.system.seconds', + 'storage_api_process_cpu_user_seconds': 'process_cpu.user.seconds', + 'storage_api_process_start_time_seconds': { + 'name': 'process.uptime.seconds', + 'type': 'time_elapsed', + }, + 'storage_api_process_resident_memory_bytes': 'process.resident_memory.bytes', + 'storage_api_process_virtual_memory_bytes': 'process.virtual_memory.bytes', + 'storage_api_process_heap_bytes': 'process.heap_bytes', + 'storage_api_process_open_fds': 'process.open_fds', + 'storage_api_process_max_fds': 'process.max_fds', + 'storage_api_nodejs_eventloop_lag_seconds': 'nodejs.eventloop_lag.seconds', + 'storage_api_nodejs_eventloop_lag_min_seconds': 'nodejs_eventloop_lag.min_seconds', + 'storage_api_nodejs_eventloop_lag_max_seconds': 'nodejs.eventloop_lag.max_seconds', + 'storage_api_nodejs_eventloop_lag_mean_seconds': 'nodejs.eventloop_lag.mean_seconds', + 'storage_api_nodejs_eventloop_lag_stddev_seconds': 'nodejs.eventloop_lag.stddev_seconds', + 'storage_api_nodejs_eventloop_lag_p50_seconds': 'nodejs.eventloop_lag.p50_seconds', + 'storage_api_nodejs_eventloop_lag_p90_seconds': 'nodejs.eventloop_lag.p90_seconds', + 'storage_api_nodejs_eventloop_lag_p99_seconds': 'nodejs.eventloop_lag.p99_seconds', + 'storage_api_nodejs_active_resources': 'nodejs.active_resources', + 'storage_api_nodejs_active_resources_total': 'nodejs.active_resources.total', + 'storage_api_nodejs_active_handles': 'nodejs.active_handles', + 'storage_api_nodejs_active_handles_total': 'nodejs.active_handles.total', + 'storage_api_nodejs_active_requests': 'nodejs.active_requests', + 'storage_api_nodejs_active_requests_total': 'nodejs.active_requests.total', + 'storage_api_nodejs_gc_duration_seconds': 'nodejs.gc_duration.seconds', + 'storage_api_nodejs_heap_size_total_bytes': 'nodejs.heap_size.total_bytes', + 'storage_api_nodejs_heap_size_used_bytes': 'nodejs.heap_size.used_bytes', + 'storage_api_nodejs_external_memory_bytes': 'nodejs.external_memory.bytes', + 'storage_api_nodejs_heap_space_size_total_bytes': 'nodejs.heap_space_size.total_bytes', + 'storage_api_nodejs_heap_space_size_used_bytes': 'nodejs.heap_space_size.used_bytes', + 'storage_api_nodejs_heap_space_size_available_bytes': 'nodejs.heap_space_size.available_bytes', + 'storage_api_nodejs_version_info': 'nodejs.version_info', + } +] + +RENAME_LABELS_MAP = { + 'version': 'component_version', +} + +SUPABASE_METRICS = [{**DEFAULT_GO_METRICS, **PRIVELEGED_METRICS}] diff --git a/supabase/hatch.toml b/supabase/hatch.toml new file mode 100644 index 0000000000000..c85c5f07a7df2 --- /dev/null +++ b/supabase/hatch.toml @@ -0,0 +1,4 @@ +[env.collectors.datadog-checks] + +[[envs.default.matrix]] +python = ["3.12"] diff --git a/supabase/manifest.json b/supabase/manifest.json new file mode 100644 index 0000000000000..8f0168564f6d4 --- /dev/null +++ b/supabase/manifest.json @@ -0,0 +1,55 @@ +{ + "manifest_version": "2.0.0", + "app_uuid": "f22fec2a-ff0a-4380-8ddf-3348f1e7ff15", + "app_id": "supabase", + "display_on_public_website": false, + "tile": { + "overview": "README.md#Overview", + "configuration": "README.md#Setup", + "support": "README.md#Support", + "changelog": "CHANGELOG.md", + "description": "", + "title": "Supabase", + "media": [], + "classifier_tags": [ + "Supported OS::Linux", + "Supported OS::Windows", + "Supported OS::macOS", + "Category::Metrics", + "Category::Kubernetes", + "Category::Security", + "Submitted Data Type::Metrics", + "Offering::Integration" + ] + }, + "assets": { + "integration": { + "auto_install": true, + "source_type_id": 34976974, + "source_type_name": "Supabase", + "configuration": { + "spec": "assets/configuration/spec.yaml" + }, + "events": { + "creates_events": false + }, + "metrics": { + "prefix": "supabase.", + "check": "supabase.pg.up", + "metadata_path": "metadata.csv" + }, + "service_checks": { + "metadata_path": "assets/service_checks.json" + } + }, + "dashboards": { + "Supabase Overview": "assets/dashboards/supabase_overview.json" + } + }, + "author": { + "support_email": "help@datadoghq.com", + "name": "Datadog", + "homepage": "https://www.datadoghq.com", + "sales_email": "info@datadoghq.com" + } +} diff --git a/supabase/metadata.csv b/supabase/metadata.csv new file mode 100644 index 0000000000000..a89e4368777d1 --- /dev/null +++ b/supabase/metadata.csv @@ -0,0 +1,266 @@ +metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name,curated_metric,sample_tags +supabase.auth_users.user_count,gauge,,,,,0,supabase,,, +supabase.db.sql.connection_closed_max_idle.count,count,,,,,0,supabase,,, +supabase.db.sql.connection_closed_max_idle_time.count,count,,,,,0,supabase,,, +supabase.db.sql.connection_closed_max_lifetime.count,count,,,,,0,supabase,,, +supabase.db.sql.connection_max_open,gauge,,,,,0,supabase,,, +supabase.db.sql.connection_open,gauge,,,,,0,supabase,,, +supabase.db.sql.connection_wait.count,count,,,,,0,supabase,,, +supabase.db.sql.connection_wait_duration.count,count,,,,,0,supabase,,, +supabase.db.transmit_bytes.count,count,,,,,0,supabase,,, +supabase.go.gc.duration.seconds.count,count,,,,,0,supabase,,, +supabase.go.gc.duration.seconds.quantile,gauge,,,,,0,supabase,,, +supabase.go.gc.duration.seconds.sum,count,,,,,0,supabase,,, +supabase.go.goroutines,gauge,,,,,0,supabase,,, +supabase.go.memstats.buck_hash.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.frees.count,count,,,,,0,supabase,,, +supabase.go.memstats.gc.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.heap.alloc_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.heap.idle_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.heap.inuse_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.heap.objects,gauge,,,,,0,supabase,,, +supabase.go.memstats.heap.released_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.heap.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.last_gc_time.seconds,gauge,,,,,0,supabase,,, +supabase.go.memstats.lookups.count,count,,,,,0,supabase,,, +supabase.go.memstats.mallocs.count,count,,,,,0,supabase,,, +supabase.go.memstats.mcache.inuse_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.mcache.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.mspan.inuse_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.mspan.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.next.gc_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.other.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.stack.inuse_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.stack.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.threads,gauge,,,,,0,supabase,,, +supabase.http.server.duration.bucket,count,,,,,0,supabase,,, +supabase.http.server.duration.count,count,,,,,0,supabase,,, +supabase.http.server.duration.sum,count,,,,,0,supabase,,, +supabase.http.server.request.size_bytes.count,count,,,,,0,supabase,,, +supabase.http.server.response.size_bytes.count,count,,,,,0,supabase,,, +supabase.http.status_codes.count,count,,,,,0,supabase,,, +supabase.node.cpu.guest_seconds.count,count,,,,,0,supabase,,, +supabase.node.cpu.seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.discard_time_seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.discarded_sectors.count,count,,,,,0,supabase,,, +supabase.node.disk.discards_completed.count,count,,,,,0,supabase,,, +supabase.node.disk.discards_merged.count,count,,,,,0,supabase,,, +supabase.node.disk.flush_requests.count,count,,,,,0,supabase,,, +supabase.node.disk.flush_requests_time_seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.io_now,gauge,,,,,0,supabase,,, +supabase.node.disk.io_time_seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.io_time_weighted_seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.read_bytes.count,count,,,,,0,supabase,,, +supabase.node.disk.read_time_seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.reads_completed.count,count,,,,,0,supabase,,, +supabase.node.disk.reads_merged.count,count,,,,,0,supabase,,, +supabase.node.disk.write_time_seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.writes_completed.count,count,,,,,0,supabase,,, +supabase.node.disk.writes_merged.count,count,,,,,0,supabase,,, +supabase.node.disk.written_bytes.count,count,,,,,0,supabase,,, +supabase.node.filesystem.available_bytes,gauge,,,,,0,supabase,,, +supabase.node.filesystem.device_error,gauge,,,,,0,supabase,,, +supabase.node.filesystem.files,gauge,,,,,0,supabase,,, +supabase.node.filesystem.files_free,gauge,,,,,0,supabase,,, +supabase.node.filesystem.free_bytes,gauge,,,,,0,supabase,,, +supabase.node.filesystem.readonly,gauge,,,,,0,supabase,,, +supabase.node.filesystem.size_bytes,gauge,,,,,0,supabase,,, +supabase.node.load1,gauge,,,,,0,supabase,,, +supabase.node.load15,gauge,,,,,0,supabase,,, +supabase.node.load5,gauge,,,,,0,supabase,,, +supabase.node.memory.active_anon_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.active_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.active_file_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.anon_huge_pages_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.anon_pages_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.bounce_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.buffers_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.cached_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.commit_limit_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.committed_as_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.dirty_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.file_huge_pages_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.file_pmd_mapped_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.hardware_corrupted_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.huge_page_size_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.huge_pages_free,gauge,,,,,0,supabase,,, +supabase.node.memory.huge_pages_reserved,gauge,,,,,0,supabase,,, +supabase.node.memory.huge_pages_surp,gauge,,,,,0,supabase,,, +supabase.node.memory.huge_pages_total,gauge,,,,,0,supabase,,, +supabase.node.memory.hugetlb_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.inactive_anon_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.inactive_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.inactive_file_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.kernel_stack_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.kreclaimable_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.mapped_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.mem_available_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.mem_free_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.mem_total_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.mlocked_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.nfs_unstable_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.page_tables_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.percpu_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.shmem_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.shmem_huge_pages_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.shmem_pmd_mapped_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.slab_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.sreclaimable_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.sunreclaim_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.swap_cached_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.swap_free_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.swap_total_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.unevictable_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.vm_alloc_chunk_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.vm_alloc_total_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.vm_alloc_used_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.writeback_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.writeback_tmp_bytes,gauge,,,,,0,supabase,,, +supabase.node.network.receive_bytes.count,count,,,,,0,supabase,,, +supabase.node.network.receive_compressed.count,count,,,,,0,supabase,,, +supabase.node.network.receive_drop.count,count,,,,,0,supabase,,, +supabase.node.network.receive_errors.count,count,,,,,0,supabase,,, +supabase.node.network.receive_fifo.count,count,,,,,0,supabase,,, +supabase.node.network.receive_frame.count,count,,,,,0,supabase,,, +supabase.node.network.receive_multicast.count,count,,,,,0,supabase,,, +supabase.node.network.receive_packets.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_bytes.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_carrier.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_colls.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_compressed.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_drop.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_errors.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_fifo.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_packets.count,count,,,,,0,supabase,,, +supabase.node.scrape.collector_duration_seconds,gauge,,,,,0,supabase,,, +supabase.node.scrape.collector_success,gauge,,,,,0,supabase,,, +supabase.node.vmstat.oom_kill.count,count,,,,,0,supabase,,, +supabase.node.vmstat.pgfault.count,count,,,,,0,supabase,,, +supabase.node.vmstat.pgmajfault.count,count,,,,,0,supabase,,, +supabase.node.vmstat.pgpgin.count,count,,,,,0,supabase,,, +supabase.node.vmstat.pgpgout.count,count,,,,,0,supabase,,, +supabase.node.vmstat.pswpin.count,count,,,,,0,supabase,,, +supabase.node.vmstat.pswpout.count,count,,,,,0,supabase,,, +supabase.pg.up,gauge,,,,,0,supabase,,, +supabase.pg_database_size.bytes,gauge,,,,,0,supabase,,, +supabase.pg_database_size.mb,gauge,,,,,0,supabase,,, +supabase.pg_exporter.last_scrape_duration_seconds,gauge,,,,,0,supabase,,, +supabase.pg_exporter.last_scrape_error,gauge,,,,,0,supabase,,, +supabase.pg_exporter.scrapes.count,count,,,,,0,supabase,,, +supabase.pg_exporter.user_queries_load_error,gauge,,,,,0,supabase,,, +supabase.pg_ls.archive_statusdir_wal_pending_count.count,count,,,,,0,supabase,,, +supabase.pg_replication_slots.max_lag_bytes,gauge,,,,,0,supabase,,, +supabase.pg_scrape_collector.duration_seconds,gauge,,,,,0,supabase,,, +supabase.pg_scrape_collector.success,gauge,,,,,0,supabase,,, +supabase.pg_settings.default_transaction_read_only,gauge,,,,,0,supabase,,, +supabase.pg_stat_activity.xact_runtime,gauge,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.buffers_alloc.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.buffers_backend.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.buffers_backend_fsync.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.buffers_checkpoint.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.buffers_clean.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.checkpoint_sync_time.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.checkpoint_write_time.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.checkpoints_req.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.checkpoints_timed.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.maxwritten_clean.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.stats_reset.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.blks_hit.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.blks_read.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.conflicts.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.deadlocks.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.num_backends,gauge,,,,,0,supabase,,, +supabase.pg_stat_database.temp_bytes.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.temp_files.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.tup_deleted.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.tup_fetched.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.tup_inserted.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.tup_returned.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.tup_updated.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.xact_commit.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.xact_rollback.count,count,,,,,0,supabase,,, +supabase.pg_stat_database_conflicts.confl_bufferpin.count,count,,,,,0,supabase,,, +supabase.pg_stat_database_conflicts.confl_deadlock.count,count,,,,,0,supabase,,, +supabase.pg_stat_database_conflicts.confl_lock.count,count,,,,,0,supabase,,, +supabase.pg_stat_database_conflicts.confl_snapshot.count,count,,,,,0,supabase,,, +supabase.pg_stat_database_conflicts.confl_tablespace.count,count,,,,,0,supabase,,, +supabase.pg_stat_statements.total_queries.count,count,,,,,0,supabase,,, +supabase.pg_stat_statements.total_time_seconds.count,count,,,,,0,supabase,,, +supabase.pg_status.in_recovery,gauge,,,,,0,supabase,,, +supabase.pg_wal.size,gauge,,,,,0,supabase,,, +supabase.pgrst.db_pool.available_connections,gauge,,,,,0,supabase,,, +supabase.pgrst.db_pool.connection_timeouts.count,count,,,,,0,supabase,,, +supabase.pgrst.db_pool.max_connections,gauge,,,,,0,supabase,,, +supabase.pgrst.db_pool.requests_waiting,gauge,,,,,0,supabase,,, +supabase.pgrst.schema_cache.loads.count,count,,,,,0,supabase,,, +supabase.pgrst.schema_cache.query_time_seconds,gauge,,,,,0,supabase,,, +supabase.physical_replication_lag.is_connected_to_primary,gauge,,,,,0,supabase,,, +supabase.physical_replication_lag.is_wal_replay_paused,gauge,,,,,0,supabase,,, +supabase.physical_replication_lag.seconds,gauge,,,,,0,supabase,,, +supabase.postgres_exporter.build_info,gauge,,,,,0,supabase,,, +supabase.postgres_exporter.config_last_reload_success_timestamp_seconds,gauge,,,,,0,supabase,,, +supabase.postgres_exporter.config_last_reload_successful,gauge,,,,,0,supabase,,, +supabase.postgresql.restarts.count,count,,,,,0,supabase,,, +supabase.process.cpu.seconds.count,count,,,,,0,supabase,,, +supabase.process.max_fds,gauge,,,,,0,supabase,,, +supabase.process.open_fds,gauge,,,,,0,supabase,,, +supabase.process.resident_memory.bytes,gauge,,,,,0,supabase,,, +supabase.process.runtime.go_mem_live_objects,gauge,,,,,0,supabase,,, +supabase.process.start_time.seconds,gauge,,,,,0,supabase,,, +supabase.process.virtual_memory.bytes,gauge,,,,,0,supabase,,, +supabase.process.virtual_memory.max_bytes,gauge,,,,,0,supabase,,, +supabase.promhttp_metric_handler.requests.count,count,,,,,0,supabase,,, +supabase.promhttp_metric_handler.requests_in_flight,gauge,,,,,0,supabase,,, +supabase.realtime_postgres_changes.client_subscriptions,gauge,,,,,0,supabase,,, +supabase.realtime_postgres_changes.total_subscriptions,gauge,,,,,0,supabase,,, +supabase.runtime.uptime_milliseconds,gauge,,,,,0,supabase,,, +supabase.storage.storage_size,gauge,,,,,0,supabase,,, +supabase.storage_api.database_query_performance.bucket,count,,,,,0,supabase,,, +supabase.storage_api.database_query_performance.count,count,,,,,0,supabase,,, +supabase.storage_api.database_query_performance.sum,count,,,,,0,supabase,,, +supabase.storage_api.db_connections,gauge,,,,,0,supabase,,, +supabase.storage_api.db_pool,gauge,,,,,0,supabase,,, +supabase.storage_api.http_pool.busy_sockets,gauge,,,,,0,supabase,,, +supabase.storage_api.http_pool.errors,gauge,,,,,0,supabase,,, +supabase.storage_api.http_pool.free_sockets,gauge,,,,,0,supabase,,, +supabase.storage_api.http_pool.requests,gauge,,,,,0,supabase,,, +supabase.storage_api.http_request.duration_seconds.bucket,count,,,,,0,supabase,,, +supabase.storage_api.http_request.duration_seconds.count,count,,,,,0,supabase,,, +supabase.storage_api.http_request.duration_seconds.sum,count,,,,,0,supabase,,, +supabase.storage_api.http_request.summary_seconds.count,count,,,,,0,supabase,,, +supabase.storage_api.http_request.summary_seconds.quantile,gauge,,,,,0,supabase,,, +supabase.storage_api.http_request.summary_seconds.sum,count,,,,,0,supabase,,, +supabase.storage_api.nodejs.active_handles,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.active_handles.total,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.active_requests.total,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.active_resources,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.active_resources.total,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.max_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.mean_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.p50_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.p90_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.p99_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.stddev_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.external_memory.bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.gc_duration.seconds.bucket,count,,,,,0,supabase,,, +supabase.storage_api.nodejs.gc_duration.seconds.count,count,,,,,0,supabase,,, +supabase.storage_api.nodejs.gc_duration.seconds.sum,count,,,,,0,supabase,,, +supabase.storage_api.nodejs.heap_size.total_bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.heap_size.used_bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.heap_space_size.available_bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.heap_space_size.total_bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.heap_space_size.used_bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.version_info,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs_eventloop_lag.min_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.process.heap_bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.process.max_fds,gauge,,,,,0,supabase,,, +supabase.storage_api.process.open_fds,gauge,,,,,0,supabase,,, +supabase.storage_api.process.resident_memory.bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.process.uptime.seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.process.virtual_memory.bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.process_cpu.seconds.count,count,,,,,0,supabase,,, +supabase.storage_api.process_cpu.system.seconds.count,count,,,,,0,supabase,,, +supabase.storage_api.process_cpu.user.seconds.count,count,,,,,0,supabase,,, +supabase.usage_metrics.user_queries.count,count,,,,,0,supabase,,, diff --git a/supabase/pyproject.toml b/supabase/pyproject.toml new file mode 100644 index 0000000000000..de884876b82f1 --- /dev/null +++ b/supabase/pyproject.toml @@ -0,0 +1,60 @@ +[build-system] +requires = [ + "hatchling>=0.13.0", +] +build-backend = "hatchling.build" + +[project] +name = "datadog-supabase" +description = "The supabase check" +readme = "README.md" +license = "BSD-3-Clause" +requires-python = ">=3.12" +keywords = [ + "datadog", + "datadog agent", + "datadog check", + "supabase", +] +authors = [ + { name = "Datadog", email = "packages@datadoghq.com" }, +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: BSD License", + "Private :: Do Not Upload", + "Programming Language :: Python :: 3.12", + "Topic :: System :: Monitoring", +] +dependencies = [ + "datadog-checks-base>=37.0.0", +] +dynamic = [ + "version", +] + +[project.optional-dependencies] +deps = [] + +[project.urls] +Source = "https://github.com/DataDog/integrations-core" + +[tool.hatch.version] +path = "datadog_checks/supabase/__about__.py" + +[tool.hatch.build.targets.sdist] +include = [ + "/datadog_checks", + "/tests", + "/manifest.json", +] + +[tool.hatch.build.targets.wheel] +include = [ + "/datadog_checks/supabase", +] +dev-mode-dirs = [ + ".", +] diff --git a/supabase/tests/__init__.py b/supabase/tests/__init__.py new file mode 100644 index 0000000000000..9103122bf028d --- /dev/null +++ b/supabase/tests/__init__.py @@ -0,0 +1,3 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) diff --git a/supabase/tests/common.py b/supabase/tests/common.py new file mode 100644 index 0000000000000..e08dddcd90a6d --- /dev/null +++ b/supabase/tests/common.py @@ -0,0 +1,313 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import os + +from datadog_checks.dev import get_docker_hostname, get_here + +HERE = get_here() +HOST = get_docker_hostname() +PRIVELEGED_METRICS_PORT = 8000 +STORAGE_API_PORT = 9000 + + +def get_fixture_path(filename): + return os.path.join(HERE, 'fixtures', filename) + + +MOCKED_INSTANCE = { + "privileged_metrics_endpoint": f"http://{HOST}:{PRIVELEGED_METRICS_PORT}/metrics", + "storage_api_endpoint": f"http://{HOST}:{STORAGE_API_PORT}/metrics", + "tags": ['test:test'], +} + +COMPOSE_FILE = os.path.join(HERE, 'docker', 'docker-compose.yaml') + +PRIVILEGED_METRICS_INSTANCE = { + "privileged_metrics_endpoint": f"http://{HOST}:{PRIVELEGED_METRICS_PORT}/metrics", + "tags": ['test:test'], +} + +STORAGE_API_INSTANCE = { + "storage_api_endpoint": f"http://{HOST}:{STORAGE_API_PORT}/metrics", + "tags": ['test:test'], +} + +COMPOSE_FILE = os.path.join(HERE, 'docker', 'docker-compose.yaml') + +( + PRIVILEGED_METRICS_NAMESPACE, + STORAGE_API_METRICS_NAMESPACE, +) = [ + 'supabase', + 'supabase.storage_api', +] + +PRIVILEGED_METRICS = [ + 'supabase.auth_users.user_count', + 'supabase.db.sql.connection_closed_max_idle.count', + 'supabase.db.sql.connection_closed_max_idle_time.count', + 'supabase.db.sql.connection_closed_max_lifetime.count', + 'supabase.db.sql.connection_max_open', + 'supabase.db.sql.connection_open', + 'supabase.db.sql.connection_wait.count', + 'supabase.db.sql.connection_wait_duration.count', + 'supabase.db.transmit_bytes.count', + 'supabase.go.gc.duration.seconds.count', + 'supabase.go.gc.duration.seconds.quantile', + 'supabase.go.gc.duration.seconds.sum', + 'supabase.go.goroutines', + 'supabase.go.memstats.buck_hash.sys_bytes', + 'supabase.go.memstats.frees.count', + 'supabase.go.memstats.gc.sys_bytes', + 'supabase.go.memstats.heap.alloc_bytes', + 'supabase.go.memstats.heap.idle_bytes', + 'supabase.go.memstats.heap.inuse_bytes', + 'supabase.go.memstats.heap.objects', + 'supabase.go.memstats.heap.released_bytes', + 'supabase.go.memstats.heap.sys_bytes', + 'supabase.go.memstats.last_gc_time.seconds', + 'supabase.go.memstats.lookups.count', + 'supabase.go.memstats.mallocs.count', + 'supabase.go.memstats.mcache.inuse_bytes', + 'supabase.go.memstats.mcache.sys_bytes', + 'supabase.go.memstats.mspan.inuse_bytes', + 'supabase.go.memstats.mspan.sys_bytes', + 'supabase.go.memstats.next.gc_bytes', + 'supabase.go.memstats.other.sys_bytes', + 'supabase.go.memstats.stack.inuse_bytes', + 'supabase.go.memstats.stack.sys_bytes', + 'supabase.go.memstats.sys_bytes', + 'supabase.go.threads', + 'supabase.http.server.duration.bucket', + 'supabase.http.server.duration.count', + 'supabase.http.server.duration.sum', + 'supabase.http.server.request.size_bytes.count', + 'supabase.http.server.response.size_bytes.count', + 'supabase.http.status_codes.count', + 'supabase.node.cpu.guest_seconds.count', + 'supabase.node.cpu.seconds.count', + 'supabase.node.disk.discard_time_seconds.count', + 'supabase.node.disk.discarded_sectors.count', + 'supabase.node.disk.discards_completed.count', + 'supabase.node.disk.discards_merged.count', + 'supabase.node.disk.flush_requests.count', + 'supabase.node.disk.flush_requests_time_seconds.count', + 'supabase.node.disk.io_now', + 'supabase.node.disk.io_time_seconds.count', + 'supabase.node.disk.io_time_weighted_seconds.count', + 'supabase.node.disk.read_bytes.count', + 'supabase.node.disk.read_time_seconds.count', + 'supabase.node.disk.reads_completed.count', + 'supabase.node.disk.reads_merged.count', + 'supabase.node.disk.write_time_seconds.count', + 'supabase.node.disk.writes_completed.count', + 'supabase.node.disk.writes_merged.count', + 'supabase.node.disk.written_bytes.count', + 'supabase.node.filesystem.available_bytes', + 'supabase.node.filesystem.device_error', + 'supabase.node.filesystem.files', + 'supabase.node.filesystem.files_free', + 'supabase.node.filesystem.free_bytes', + 'supabase.node.filesystem.readonly', + 'supabase.node.filesystem.size_bytes', + 'supabase.node.load1', + 'supabase.node.load15', + 'supabase.node.load5', + 'supabase.node.memory.active_anon_bytes', + 'supabase.node.memory.active_bytes', + 'supabase.node.memory.active_file_bytes', + 'supabase.node.memory.anon_huge_pages_bytes', + 'supabase.node.memory.anon_pages_bytes', + 'supabase.node.memory.bounce_bytes', + 'supabase.node.memory.buffers_bytes', + 'supabase.node.memory.cached_bytes', + 'supabase.node.memory.commit_limit_bytes', + 'supabase.node.memory.committed_as_bytes', + 'supabase.node.memory.dirty_bytes', + 'supabase.node.memory.file_huge_pages_bytes', + 'supabase.node.memory.file_pmd_mapped_bytes', + 'supabase.node.memory.hardware_corrupted_bytes', + 'supabase.node.memory.huge_page_size_bytes', + 'supabase.node.memory.huge_pages_free', + 'supabase.node.memory.huge_pages_reserved', + 'supabase.node.memory.huge_pages_surp', + 'supabase.node.memory.huge_pages_total', + 'supabase.node.memory.hugetlb_bytes', + 'supabase.node.memory.inactive_anon_bytes', + 'supabase.node.memory.inactive_bytes', + 'supabase.node.memory.inactive_file_bytes', + 'supabase.node.memory.kernel_stack_bytes', + 'supabase.node.memory.kreclaimable_bytes', + 'supabase.node.memory.mapped_bytes', + 'supabase.node.memory.mem_available_bytes', + 'supabase.node.memory.mem_free_bytes', + 'supabase.node.memory.mem_total_bytes', + 'supabase.node.memory.mlocked_bytes', + 'supabase.node.memory.nfs_unstable_bytes', + 'supabase.node.memory.page_tables_bytes', + 'supabase.node.memory.percpu_bytes', + 'supabase.node.memory.shmem_bytes', + 'supabase.node.memory.shmem_huge_pages_bytes', + 'supabase.node.memory.shmem_pmd_mapped_bytes', + 'supabase.node.memory.slab_bytes', + 'supabase.node.memory.sreclaimable_bytes', + 'supabase.node.memory.sunreclaim_bytes', + 'supabase.node.memory.swap_cached_bytes', + 'supabase.node.memory.swap_free_bytes', + 'supabase.node.memory.swap_total_bytes', + 'supabase.node.memory.unevictable_bytes', + 'supabase.node.memory.vm_alloc_chunk_bytes', + 'supabase.node.memory.vm_alloc_total_bytes', + 'supabase.node.memory.vm_alloc_used_bytes', + 'supabase.node.memory.writeback_bytes', + 'supabase.node.memory.writeback_tmp_bytes', + 'supabase.node.network.receive_bytes.count', + 'supabase.node.network.receive_compressed.count', + 'supabase.node.network.receive_drop.count', + 'supabase.node.network.receive_errors.count', + 'supabase.node.network.receive_fifo.count', + 'supabase.node.network.receive_frame.count', + 'supabase.node.network.receive_multicast.count', + 'supabase.node.network.receive_packets.count', + 'supabase.node.network.transmit_bytes.count', + 'supabase.node.network.transmit_carrier.count', + 'supabase.node.network.transmit_colls.count', + 'supabase.node.network.transmit_compressed.count', + 'supabase.node.network.transmit_drop.count', + 'supabase.node.network.transmit_errors.count', + 'supabase.node.network.transmit_fifo.count', + 'supabase.node.network.transmit_packets.count', + 'supabase.node.scrape.collector_duration_seconds', + 'supabase.node.scrape.collector_success', + 'supabase.node.vmstat.oom_kill.count', + 'supabase.node.vmstat.pgfault.count', + 'supabase.node.vmstat.pgmajfault.count', + 'supabase.node.vmstat.pgpgin.count', + 'supabase.node.vmstat.pgpgout.count', + 'supabase.node.vmstat.pswpin.count', + 'supabase.node.vmstat.pswpout.count', + 'supabase.pg.up', + 'supabase.pg_database_size.bytes', + 'supabase.pg_database_size.mb', + 'supabase.pg_exporter.last_scrape_duration_seconds', + 'supabase.pg_exporter.last_scrape_error', + 'supabase.pg_exporter.scrapes.count', + 'supabase.pg_exporter.user_queries_load_error', + 'supabase.pg_ls.archive_statusdir_wal_pending_count.count', + 'supabase.pg_replication_slots.max_lag_bytes', + 'supabase.pg_scrape_collector.duration_seconds', + 'supabase.pg_scrape_collector.success', + 'supabase.pg_settings.default_transaction_read_only', + 'supabase.pg_stat_activity.xact_runtime', + 'supabase.pg_stat_bgwriter.buffers_alloc.count', + 'supabase.pg_stat_bgwriter.buffers_backend.count', + 'supabase.pg_stat_bgwriter.buffers_backend_fsync.count', + 'supabase.pg_stat_bgwriter.buffers_checkpoint.count', + 'supabase.pg_stat_bgwriter.buffers_clean.count', + 'supabase.pg_stat_bgwriter.checkpoint_sync_time.count', + 'supabase.pg_stat_bgwriter.checkpoint_write_time.count', + 'supabase.pg_stat_bgwriter.checkpoints_req.count', + 'supabase.pg_stat_bgwriter.checkpoints_timed.count', + 'supabase.pg_stat_bgwriter.maxwritten_clean.count', + 'supabase.pg_stat_bgwriter.stats_reset.count', + 'supabase.pg_stat_database.blks_hit.count', + 'supabase.pg_stat_database.blks_read.count', + 'supabase.pg_stat_database.conflicts.count', + 'supabase.pg_stat_database.deadlocks.count', + 'supabase.pg_stat_database.num_backends', + 'supabase.pg_stat_database.temp_bytes.count', + 'supabase.pg_stat_database.temp_files.count', + 'supabase.pg_stat_database.tup_deleted.count', + 'supabase.pg_stat_database.tup_fetched.count', + 'supabase.pg_stat_database.tup_inserted.count', + 'supabase.pg_stat_database.tup_returned.count', + 'supabase.pg_stat_database.tup_updated.count', + 'supabase.pg_stat_database.xact_commit.count', + 'supabase.pg_stat_database.xact_rollback.count', + 'supabase.pg_stat_database_conflicts.confl_bufferpin.count', + 'supabase.pg_stat_database_conflicts.confl_deadlock.count', + 'supabase.pg_stat_database_conflicts.confl_lock.count', + 'supabase.pg_stat_database_conflicts.confl_snapshot.count', + 'supabase.pg_stat_database_conflicts.confl_tablespace.count', + 'supabase.pg_stat_statements.total_queries.count', + 'supabase.pg_stat_statements.total_time_seconds.count', + 'supabase.pg_status.in_recovery', + 'supabase.pg_wal.size', + 'supabase.pgrst.db_pool.available_connections', + 'supabase.pgrst.db_pool.connection_timeouts.count', + 'supabase.pgrst.db_pool.max_connections', + 'supabase.pgrst.db_pool.requests_waiting', + 'supabase.pgrst.schema_cache.loads.count', + 'supabase.pgrst.schema_cache.query_time_seconds', + 'supabase.physical_replication_lag.is_connected_to_primary', + 'supabase.physical_replication_lag.is_wal_replay_paused', + 'supabase.physical_replication_lag.seconds', + 'supabase.postgres_exporter.build_info', + 'supabase.postgres_exporter.config_last_reload_success_timestamp_seconds', + 'supabase.postgres_exporter.config_last_reload_successful', + 'supabase.postgresql.restarts.count', + 'supabase.process.cpu.seconds.count', + 'supabase.process.max_fds', + 'supabase.process.open_fds', + 'supabase.process.resident_memory.bytes', + 'supabase.process.runtime.go_mem_live_objects', + 'supabase.process.start_time.seconds', + 'supabase.process.virtual_memory.bytes', + 'supabase.process.virtual_memory.max_bytes', + 'supabase.promhttp_metric_handler.requests.count', + 'supabase.promhttp_metric_handler.requests_in_flight', + 'supabase.realtime_postgres_changes.client_subscriptions', + 'supabase.realtime_postgres_changes.total_subscriptions', + 'supabase.runtime.uptime_milliseconds', + 'supabase.usage_metrics.user_queries.count', +] +STORAGE_API_METRICS = [ + 'supabase.storage_api.database_query_performance.bucket', + 'supabase.storage_api.database_query_performance.count', + 'supabase.storage_api.database_query_performance.sum', + 'supabase.storage_api.db_connections', + 'supabase.storage_api.db_pool', + 'supabase.storage_api.http_pool.busy_sockets', + 'supabase.storage_api.http_pool.errors', + 'supabase.storage_api.http_pool.free_sockets', + 'supabase.storage_api.http_pool.requests', + 'supabase.storage_api.http_request.duration_seconds.bucket', + 'supabase.storage_api.http_request.duration_seconds.count', + 'supabase.storage_api.http_request.duration_seconds.sum', + 'supabase.storage_api.http_request.summary_seconds.count', + 'supabase.storage_api.http_request.summary_seconds.quantile', + 'supabase.storage_api.http_request.summary_seconds.sum', + 'supabase.storage_api.nodejs.active_handles', + 'supabase.storage_api.nodejs.active_handles.total', + 'supabase.storage_api.nodejs.active_requests.total', + 'supabase.storage_api.nodejs.active_resources', + 'supabase.storage_api.nodejs.active_resources.total', + 'supabase.storage_api.nodejs.eventloop_lag.max_seconds', + 'supabase.storage_api.nodejs.eventloop_lag.mean_seconds', + 'supabase.storage_api.nodejs.eventloop_lag.p50_seconds', + 'supabase.storage_api.nodejs.eventloop_lag.p90_seconds', + 'supabase.storage_api.nodejs.eventloop_lag.p99_seconds', + 'supabase.storage_api.nodejs.eventloop_lag.seconds', + 'supabase.storage_api.nodejs.eventloop_lag.stddev_seconds', + 'supabase.storage_api.nodejs.external_memory.bytes', + 'supabase.storage_api.nodejs.gc_duration.seconds.bucket', + 'supabase.storage_api.nodejs.gc_duration.seconds.count', + 'supabase.storage_api.nodejs.gc_duration.seconds.sum', + 'supabase.storage_api.nodejs.heap_size.total_bytes', + 'supabase.storage_api.nodejs.heap_size.used_bytes', + 'supabase.storage_api.nodejs.heap_space_size.available_bytes', + 'supabase.storage_api.nodejs.heap_space_size.total_bytes', + 'supabase.storage_api.nodejs.heap_space_size.used_bytes', + 'supabase.storage_api.nodejs.version_info', + 'supabase.storage_api.nodejs_eventloop_lag.min_seconds', + 'supabase.storage_api.process.heap_bytes', + 'supabase.storage_api.process.max_fds', + 'supabase.storage_api.process.open_fds', + 'supabase.storage_api.process.resident_memory.bytes', + 'supabase.storage_api.process.uptime.seconds', + 'supabase.storage_api.process.virtual_memory.bytes', + 'supabase.storage_api.process_cpu.system.seconds.count', + 'supabase.storage_api.process_cpu.user.seconds.count', + 'supabase.storage_api.process_cpu.seconds.count', +] diff --git a/supabase/tests/conftest.py b/supabase/tests/conftest.py new file mode 100644 index 0000000000000..ccaed8fc65b94 --- /dev/null +++ b/supabase/tests/conftest.py @@ -0,0 +1,30 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import copy + +import pytest + +from datadog_checks.dev import docker_run +from datadog_checks.dev.conditions import CheckDockerLogs, CheckEndpoints + +from .common import COMPOSE_FILE, MOCKED_INSTANCE + + +@pytest.fixture(scope='session') +def dd_environment(): + compose_file = COMPOSE_FILE + conditions = [ + CheckDockerLogs(identifier='caddy', patterns=['server running']), + CheckEndpoints(MOCKED_INSTANCE["privileged_metrics_endpoint"]), + CheckEndpoints(MOCKED_INSTANCE["storage_api_endpoint"]), + ] + with docker_run(compose_file, conditions=conditions): + yield { + 'instances': [MOCKED_INSTANCE], + } + + +@pytest.fixture +def instance(): + return copy.deepcopy(MOCKED_INSTANCE) diff --git a/supabase/tests/docker/Caddyfile b/supabase/tests/docker/Caddyfile new file mode 100644 index 0000000000000..bb31bb8c0e71e --- /dev/null +++ b/supabase/tests/docker/Caddyfile @@ -0,0 +1,15 @@ +:8000 { + handle_path /metrics { + root * /usr/share/caddy + rewrite * /privileged_metrics + file_server + } +} + +:9000 { + handle_path /metrics { + root * /usr/share/caddy + rewrite * /storage_api_metrics + file_server + } +} diff --git a/supabase/tests/docker/docker-compose.yaml b/supabase/tests/docker/docker-compose.yaml new file mode 100644 index 0000000000000..8b0b2e5a5cb36 --- /dev/null +++ b/supabase/tests/docker/docker-compose.yaml @@ -0,0 +1,12 @@ +version: "3.9" +services: + caddy: + image: caddy:2 + container_name: caddy + ports: + - "8000:8000" + - "9000:9000" + volumes: + - ./Caddyfile:/etc/caddy/Caddyfile + - ../fixtures/privileged_metrics.txt:/usr/share/caddy/privileged_metrics + - ../fixtures/storage_api_metrics.txt:/usr/share/caddy/storage_api_metrics \ No newline at end of file diff --git a/supabase/tests/fixtures/privileged_metrics.txt b/supabase/tests/fixtures/privileged_metrics.txt new file mode 100644 index 0000000000000..3499a28ff89a1 --- /dev/null +++ b/supabase/tests/fixtures/privileged_metrics.txt @@ -0,0 +1,924 @@ +# HELP node_memory_Inactive_file_bytes Memory information field Inactive_file_bytes. +# TYPE node_memory_Inactive_file_bytes gauge +node_memory_Inactive_file_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 7.9081472e+07 +# HELP node_memory_SwapFree_bytes Memory information field SwapFree_bytes. +# TYPE node_memory_SwapFree_bytes gauge +node_memory_SwapFree_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 9.3165568e+08 +# HELP node_vmstat_oom_kill /proc/vmstat information field oom_kill. +# TYPE node_vmstat_oom_kill untyped +node_vmstat_oom_kill{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_load15 15m load average. +# TYPE node_load15 gauge +node_load15{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Unevictable_bytes Memory information field Unevictable_bytes. +# TYPE node_memory_Unevictable_bytes gauge +node_memory_Unevictable_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 4096 +# HELP node_network_transmit_carrier_total Network device statistic transmit_carrier. +# TYPE node_network_transmit_carrier_total counter +node_network_transmit_carrier_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_network_transmit_packets_total Network device statistic transmit_packets. +# TYPE node_network_transmit_packets_total counter +node_network_transmit_packets_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 11601 +# HELP node_memory_Dirty_bytes Memory information field Dirty_bytes. +# TYPE node_memory_Dirty_bytes gauge +node_memory_Dirty_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 57344 +# HELP node_memory_SwapTotal_bytes Memory information field SwapTotal_bytes. +# TYPE node_memory_SwapTotal_bytes gauge +node_memory_SwapTotal_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.073737728e+09 +# HELP node_memory_AnonPages_bytes Memory information field AnonPages_bytes. +# TYPE node_memory_AnonPages_bytes gauge +node_memory_AnonPages_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.29445888e+08 +# HELP node_memory_FileHugePages_bytes Memory information field FileHugePages_bytes. +# TYPE node_memory_FileHugePages_bytes gauge +node_memory_FileHugePages_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_HugePages_Surp Memory information field HugePages_Surp. +# TYPE node_memory_HugePages_Surp gauge +node_memory_HugePages_Surp{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded. +# TYPE node_scrape_collector_success gauge +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="cpu"} 1 +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="diskstats"} 1 +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="filesystem"} 1 +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="loadavg"} 1 +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="meminfo"} 1 +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="netdev"} 1 +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="vmstat"} 1 +# HELP node_vmstat_pgpgin /proc/vmstat information field pgpgin. +# TYPE node_vmstat_pgpgin untyped +node_vmstat_pgpgin{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 2.418884e+06 +# HELP node_filesystem_device_error Whether an error occurred while getting statistics for the given device. +# TYPE node_filesystem_device_error gauge +node_filesystem_device_error{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 0 +node_filesystem_device_error{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 0 +# HELP node_memory_Cached_bytes Memory information field Cached_bytes. +# TYPE node_memory_Cached_bytes gauge +node_memory_Cached_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.67378944e+08 +# HELP node_memory_Percpu_bytes Memory information field Percpu_bytes. +# TYPE node_memory_Percpu_bytes gauge +node_memory_Percpu_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.138688e+06 +# HELP node_memory_SwapCached_bytes Memory information field SwapCached_bytes. +# TYPE node_memory_SwapCached_bytes gauge +node_memory_SwapCached_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.5106048e+07 +# HELP node_network_receive_frame_total Network device statistic receive_frame. +# TYPE node_network_receive_frame_total counter +node_network_receive_frame_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape. +# TYPE node_scrape_collector_duration_seconds gauge +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="cpu"} 0.00023986 +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="diskstats"} 0.000140044 +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="filesystem"} 0.000852738 +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="loadavg"} 3.9507e-05 +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="meminfo"} 0.00016187 +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="netdev"} 9.5664e-05 +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="vmstat"} 0.000140988 +# HELP node_memory_ShmemHugePages_bytes Memory information field ShmemHugePages_bytes. +# TYPE node_memory_ShmemHugePages_bytes gauge +node_memory_ShmemHugePages_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_network_transmit_fifo_total Network device statistic transmit_fifo. +# TYPE node_network_transmit_fifo_total counter +node_network_transmit_fifo_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_vmstat_pgpgout /proc/vmstat information field pgpgout. +# TYPE node_vmstat_pgpgout untyped +node_vmstat_pgpgout{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.055552e+06 +# HELP node_memory_NFS_Unstable_bytes Memory information field NFS_Unstable_bytes. +# TYPE node_memory_NFS_Unstable_bytes gauge +node_memory_NFS_Unstable_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_MemFree_bytes Memory information field MemFree_bytes. +# TYPE node_memory_MemFree_bytes gauge +node_memory_MemFree_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 2.2237184e+07 +# HELP node_network_transmit_colls_total Network device statistic transmit_colls. +# TYPE node_network_transmit_colls_total counter +node_network_transmit_colls_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_vmstat_pswpout /proc/vmstat information field pswpout. +# TYPE node_vmstat_pswpout untyped +node_vmstat_pswpout{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 88387 +# HELP node_disk_flush_requests_time_seconds_total This is the total number of seconds spent by all flush requests. +# TYPE node_disk_flush_requests_time_seconds_total counter +node_disk_flush_requests_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_flush_requests_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_disk_written_bytes_total The total number of bytes written successfully. +# TYPE node_disk_written_bytes_total counter +node_disk_written_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 1.005322752e+09 +node_disk_written_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 7.5563008e+07 +# HELP node_network_receive_errs_total Network device statistic receive_errs. +# TYPE node_network_receive_errs_total counter +node_network_receive_errs_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_disk_io_time_weighted_seconds_total The weighted # of seconds spent doing I/Os. +# TYPE node_disk_io_time_weighted_seconds_total counter +node_disk_io_time_weighted_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 172.749 +node_disk_io_time_weighted_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 7.019 +# HELP node_filesystem_avail_bytes Filesystem space available to non-root users in bytes. +# TYPE node_filesystem_avail_bytes gauge +node_filesystem_avail_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 5.318856704e+09 +node_filesystem_avail_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 8.064999424e+09 +# HELP node_memory_Mapped_bytes Memory information field Mapped_bytes. +# TYPE node_memory_Mapped_bytes gauge +node_memory_Mapped_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.0776576e+08 +# HELP postgresql_restarts_total Number of times postgresql has been restarted +# TYPE postgresql_restarts_total counter +postgresql_restarts_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_disk_io_now The number of I/Os currently in progress. +# TYPE node_disk_io_now gauge +node_disk_io_now{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_io_now{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_memory_SReclaimable_bytes Memory information field SReclaimable_bytes. +# TYPE node_memory_SReclaimable_bytes gauge +node_memory_SReclaimable_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 3.7298176e+07 +# HELP node_memory_KernelStack_bytes Memory information field KernelStack_bytes. +# TYPE node_memory_KernelStack_bytes gauge +node_memory_KernelStack_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 3.670016e+06 +# HELP node_memory_FilePmdMapped_bytes Memory information field FilePmdMapped_bytes. +# TYPE node_memory_FilePmdMapped_bytes gauge +node_memory_FilePmdMapped_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_network_receive_drop_total Network device statistic receive_drop. +# TYPE node_network_receive_drop_total counter +node_network_receive_drop_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_memory_HugePages_Total Memory information field HugePages_Total. +# TYPE node_memory_HugePages_Total gauge +node_memory_HugePages_Total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_filesystem_size_bytes Filesystem size in bytes. +# TYPE node_filesystem_size_bytes gauge +node_filesystem_size_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 1.0359754752e+10 +node_filesystem_size_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 8.350298112e+09 +# HELP node_load5 5m load average. +# TYPE node_load5 gauge +node_load5{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Inactive_bytes Memory information field Inactive_bytes. +# TYPE node_memory_Inactive_bytes gauge +node_memory_Inactive_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.72179456e+08 +# HELP node_memory_PageTables_bytes Memory information field PageTables_bytes. +# TYPE node_memory_PageTables_bytes gauge +node_memory_PageTables_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 6.483968e+06 +# HELP node_network_receive_compressed_total Network device statistic receive_compressed. +# TYPE node_network_receive_compressed_total counter +node_network_receive_compressed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_disk_flush_requests_total The total number of flush requests completed successfully +# TYPE node_disk_flush_requests_total counter +node_disk_flush_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_flush_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_disk_discarded_sectors_total The total number of sectors discarded successfully. +# TYPE node_disk_discarded_sectors_total counter +node_disk_discarded_sectors_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_discarded_sectors_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_memory_AnonHugePages_bytes Memory information field AnonHugePages_bytes. +# TYPE node_memory_AnonHugePages_bytes gauge +node_memory_AnonHugePages_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Bounce_bytes Memory information field Bounce_bytes. +# TYPE node_memory_Bounce_bytes gauge +node_memory_Bounce_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_MemTotal_bytes Memory information field MemTotal_bytes. +# TYPE node_memory_MemTotal_bytes gauge +node_memory_MemTotal_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 4.43674624e+08 +# HELP node_network_receive_fifo_total Network device statistic receive_fifo. +# TYPE node_network_receive_fifo_total counter +node_network_receive_fifo_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_cpu_seconds_total Seconds the CPUs spent in each mode. +# TYPE node_cpu_seconds_total counter +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="idle"} 2263.65 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="iowait"} 33.6 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="irq"} 0 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="nice"} 0.04 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="softirq"} 0.37 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="steal"} 1.45 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="system"} 13.61 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="user"} 27.02 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="idle"} 2265.34 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="iowait"} 32.49 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="irq"} 0 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="nice"} 0.06 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="softirq"} 0.3 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="steal"} 1.61 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="system"} 12.2 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="user"} 27.08 +# HELP node_memory_VmallocUsed_bytes Memory information field VmallocUsed_bytes. +# TYPE node_memory_VmallocUsed_bytes gauge +node_memory_VmallocUsed_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.0760192e+07 +# HELP node_memory_Writeback_bytes Memory information field Writeback_bytes. +# TYPE node_memory_Writeback_bytes gauge +node_memory_Writeback_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Inactive_anon_bytes Memory information field Inactive_anon_bytes. +# TYPE node_memory_Inactive_anon_bytes gauge +node_memory_Inactive_anon_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 9.3097984e+07 +# HELP node_memory_Active_bytes Memory information field Active_bytes. +# TYPE node_memory_Active_bytes gauge +node_memory_Active_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.42127104e+08 +# HELP node_memory_CommitLimit_bytes Memory information field CommitLimit_bytes. +# TYPE node_memory_CommitLimit_bytes gauge +node_memory_CommitLimit_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.295572992e+09 +# HELP node_network_receive_packets_total Network device statistic receive_packets. +# TYPE node_network_receive_packets_total counter +node_network_receive_packets_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 33418 +# HELP node_network_transmit_compressed_total Network device statistic transmit_compressed. +# TYPE node_network_transmit_compressed_total counter +node_network_transmit_compressed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_disk_writes_merged_total The number of writes merged. +# TYPE node_disk_writes_merged_total counter +node_disk_writes_merged_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 88093 +node_disk_writes_merged_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 3408 +# HELP node_memory_Active_anon_bytes Memory information field Active_anon_bytes. +# TYPE node_memory_Active_anon_bytes gauge +node_memory_Active_anon_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 5.8032128e+07 +# HELP node_disk_reads_completed_total The total number of reads completed successfully. +# TYPE node_disk_reads_completed_total counter +node_disk_reads_completed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 88478 +node_disk_reads_completed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 2670 +# HELP node_disk_discard_time_seconds_total This is the total number of seconds spent by all discards. +# TYPE node_disk_discard_time_seconds_total counter +node_disk_discard_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_discard_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_disk_reads_merged_total The total number of reads merged. +# TYPE node_disk_reads_merged_total counter +node_disk_reads_merged_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 30022 +node_disk_reads_merged_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 342 +# HELP node_filesystem_files_free Filesystem total free file nodes. +# TYPE node_filesystem_files_free gauge +node_filesystem_files_free{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 466013 +node_filesystem_files_free{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 523105 +# HELP node_memory_HugePages_Free Memory information field HugePages_Free. +# TYPE node_memory_HugePages_Free gauge +node_memory_HugePages_Free{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP db_transmit_bytes postgres and pgbouncer network transmit bytes +# TYPE db_transmit_bytes counter +db_transmit_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 89799 +# HELP node_memory_VmallocTotal_bytes Memory information field VmallocTotal_bytes. +# TYPE node_memory_VmallocTotal_bytes gauge +node_memory_VmallocTotal_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.3633903919104e+14 +# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes. +# TYPE node_network_transmit_bytes_total counter +node_network_transmit_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 2.726347e+06 +# HELP node_load1 1m load average. +# TYPE node_load1 gauge +node_load1{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_network_receive_multicast_total Network device statistic receive_multicast. +# TYPE node_network_receive_multicast_total counter +node_network_receive_multicast_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_disk_discards_merged_total The total number of discards merged. +# TYPE node_disk_discards_merged_total counter +node_disk_discards_merged_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_discards_merged_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_memory_Committed_AS_bytes Memory information field Committed_AS_bytes. +# TYPE node_memory_Committed_AS_bytes gauge +node_memory_Committed_AS_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.148878848e+09 +# HELP node_memory_VmallocChunk_bytes Memory information field VmallocChunk_bytes. +# TYPE node_memory_VmallocChunk_bytes gauge +node_memory_VmallocChunk_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_filesystem_free_bytes Filesystem free space in bytes. +# TYPE node_filesystem_free_bytes gauge +node_filesystem_free_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 5.34626304e+09 +node_filesystem_free_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 8.18663424e+09 +# HELP node_disk_writes_completed_total The total number of writes completed successfully. +# TYPE node_disk_writes_completed_total counter +node_disk_writes_completed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 23492 +node_disk_writes_completed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 3503 +# HELP node_memory_ShmemPmdMapped_bytes Memory information field ShmemPmdMapped_bytes. +# TYPE node_memory_ShmemPmdMapped_bytes gauge +node_memory_ShmemPmdMapped_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_network_transmit_drop_total Network device statistic transmit_drop. +# TYPE node_network_transmit_drop_total counter +node_network_transmit_drop_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_network_transmit_errs_total Network device statistic transmit_errs. +# TYPE node_network_transmit_errs_total counter +node_network_transmit_errs_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_disk_io_time_seconds_total Total seconds spent doing I/Os. +# TYPE node_disk_io_time_seconds_total counter +node_disk_io_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 81.48 +node_disk_io_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 6.908 +# HELP node_memory_SUnreclaim_bytes Memory information field SUnreclaim_bytes. +# TYPE node_memory_SUnreclaim_bytes gauge +node_memory_SUnreclaim_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 3.5139584e+07 +# HELP node_vmstat_pswpin /proc/vmstat information field pswpin. +# TYPE node_vmstat_pswpin untyped +node_vmstat_pswpin{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 43053 +# HELP node_disk_read_time_seconds_total The total number of seconds spent by all reads. +# TYPE node_disk_read_time_seconds_total counter +node_disk_read_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 120.031 +node_disk_read_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 3.342 +# HELP node_vmstat_pgmajfault /proc/vmstat information field pgmajfault. +# TYPE node_vmstat_pgmajfault untyped +node_vmstat_pgmajfault{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 40869 +# HELP node_disk_write_time_seconds_total This is the total number of seconds spent by all writes. +# TYPE node_disk_write_time_seconds_total counter +node_disk_write_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 52.717 +node_disk_write_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 3.676 +# HELP node_memory_Hugetlb_bytes Memory information field Hugetlb_bytes. +# TYPE node_memory_Hugetlb_bytes gauge +node_memory_Hugetlb_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_MemAvailable_bytes Memory information field MemAvailable_bytes. +# TYPE node_memory_MemAvailable_bytes gauge +node_memory_MemAvailable_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 2.1239808e+08 +# HELP node_memory_WritebackTmp_bytes Memory information field WritebackTmp_bytes. +# TYPE node_memory_WritebackTmp_bytes gauge +node_memory_WritebackTmp_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Buffers_bytes Memory information field Buffers_bytes. +# TYPE node_memory_Buffers_bytes gauge +node_memory_Buffers_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 8.97024e+06 +# HELP node_disk_read_bytes_total The total number of bytes read successfully. +# TYPE node_disk_read_bytes_total counter +node_disk_read_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 2.429791232e+09 +node_disk_read_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 4.7137792e+07 +# HELP node_memory_HardwareCorrupted_bytes Memory information field HardwareCorrupted_bytes. +# TYPE node_memory_HardwareCorrupted_bytes gauge +node_memory_HardwareCorrupted_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Hugepagesize_bytes Memory information field Hugepagesize_bytes. +# TYPE node_memory_Hugepagesize_bytes gauge +node_memory_Hugepagesize_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 2.097152e+06 +# HELP node_memory_KReclaimable_bytes Memory information field KReclaimable_bytes. +# TYPE node_memory_KReclaimable_bytes gauge +node_memory_KReclaimable_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 3.7298176e+07 +# HELP node_memory_Shmem_bytes Memory information field Shmem_bytes. +# TYPE node_memory_Shmem_bytes gauge +node_memory_Shmem_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.3119488e+07 +# HELP node_network_receive_bytes_total Network device statistic receive_bytes. +# TYPE node_network_receive_bytes_total counter +node_network_receive_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 4.0247816e+07 +# HELP node_disk_discards_completed_total The total number of discards completed successfully. +# TYPE node_disk_discards_completed_total counter +node_disk_discards_completed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_discards_completed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_memory_HugePages_Rsvd Memory information field HugePages_Rsvd. +# TYPE node_memory_HugePages_Rsvd gauge +node_memory_HugePages_Rsvd{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Mlocked_bytes Memory information field Mlocked_bytes. +# TYPE node_memory_Mlocked_bytes gauge +node_memory_Mlocked_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 4096 +# HELP node_filesystem_files Filesystem total file nodes. +# TYPE node_filesystem_files gauge +node_filesystem_files{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 648960 +node_filesystem_files{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 524288 +# HELP node_filesystem_readonly Filesystem read-only status. +# TYPE node_filesystem_readonly gauge +node_filesystem_readonly{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 0 +node_filesystem_readonly{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 0 +# HELP node_memory_Active_file_bytes Memory information field Active_file_bytes. +# TYPE node_memory_Active_file_bytes gauge +node_memory_Active_file_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 8.4094976e+07 +# HELP node_memory_Slab_bytes Memory information field Slab_bytes. +# TYPE node_memory_Slab_bytes gauge +node_memory_Slab_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 7.243776e+07 +# HELP node_vmstat_pgfault /proc/vmstat information field pgfault. +# TYPE node_vmstat_pgfault untyped +node_vmstat_pgfault{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 2.18115e+06 +# HELP node_cpu_guest_seconds_total Seconds the CPUs spent in guests (VMs) for each mode. +# TYPE node_cpu_guest_seconds_total counter +node_cpu_guest_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="nice"} 0 +node_cpu_guest_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="user"} 0 +node_cpu_guest_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="nice"} 0 +node_cpu_guest_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="user"} 0 +# HELP pg_stat_database_xact_commit_total Transactions committed +# TYPE pg_stat_database_xact_commit_total counter +pg_stat_database_xact_commit_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 2472 +# HELP physical_replication_lag_is_wal_replay_paused Check if WAL replay has been paused +# TYPE physical_replication_lag_is_wal_replay_paused gauge +physical_replication_lag_is_wal_replay_paused{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP postgres_exporter_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. +# TYPE postgres_exporter_config_last_reload_success_timestamp_seconds gauge +postgres_exporter_config_last_reload_success_timestamp_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1.266716672e+09 +# HELP realtime_postgres_changes_total_subscriptions Total subscription records listening for Postgres changes +# TYPE realtime_postgres_changes_total_subscriptions gauge +realtime_postgres_changes_total_subscriptions{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_bgwriter_checkpoint_sync_time_total Time spent synchronizing checkpoint files to disk +# TYPE pg_stat_bgwriter_checkpoint_sync_time_total counter +pg_stat_bgwriter_checkpoint_sync_time_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 67 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 2400 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 15600 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1.73323902162e+09 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1.7332411937058074e+09 +# HELP pg_stat_bgwriter_buffers_alloc_total Buffers allocated +# TYPE pg_stat_bgwriter_buffers_alloc_total counter +pg_stat_bgwriter_buffers_alloc_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 12444 +# HELP pg_stat_bgwriter_buffers_backend_fsync_total fsync calls executed by a backend directly +# TYPE pg_stat_bgwriter_buffers_backend_fsync_total counter +pg_stat_bgwriter_buffers_backend_fsync_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_statements_total_time_seconds Total time spent, in seconds +# TYPE pg_stat_statements_total_time_seconds counter +pg_stat_statements_total_time_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 3.2134043250000004 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0.27 +# HELP auth_users_user_count Number of users in the project db +# TYPE auth_users_user_count gauge +auth_users_user_count{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_database_xact_rollback_total Transactions rolled back +# TYPE pg_stat_database_xact_rollback_total counter +pg_stat_database_xact_rollback_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 5 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",version="go1.21.3"} 1 +# HELP pg_exporter_user_queries_load_error Whether the user queries file was loaded and parsed successfully (1 for error, 0 for success). +# TYPE pg_exporter_user_queries_load_error gauge +pg_exporter_user_queries_load_error{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",filename="/opt/postgres_exporter/queries.yml",hashsum="0af5dd9317f7a5209580dc72ebbb795a1468c3e4cf68eabd2e423388bcf2546f"} 0 +# HELP pg_stat_bgwriter_checkpoints_timed_total Scheduled checkpoints performed +# TYPE pg_stat_bgwriter_checkpoints_timed_total counter +pg_stat_bgwriter_checkpoints_timed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 7 +# HELP postgres_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which postgres_exporter was built, and the goos and goarch for the build. +# TYPE postgres_exporter_build_info gauge +postgres_exporter_build_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",branch="HEAD",goarch="arm64",goos="linux",goversion="go1.21.3",revision="68c176b8833b7580bf847cecf60f8e0ad5923f9a",tags="unknown",version="0.15.0"} 1 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 86197 +# HELP pg_exporter_last_scrape_error Whether the last scrape of metrics from PostgreSQL resulted in an error (1 for error, 0 for success). +# TYPE pg_exporter_last_scrape_error gauge +pg_exporter_last_scrape_error{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0 +# HELP pg_exporter_scrapes_total Total number of times PostgreSQL was scraped for metrics. +# TYPE pg_exporter_scrapes_total counter +pg_exporter_scrapes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 38 +# HELP pg_stat_bgwriter_stats_reset Most recent stat reset time +# TYPE pg_stat_bgwriter_stats_reset counter +pg_stat_bgwriter_stats_reset{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 1.732551629e+09 +# HELP pg_status_in_recovery Database in recovery +# TYPE pg_status_in_recovery gauge +pg_status_in_recovery{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP postgres_exporter_config_last_reload_successful Postgres exporter config loaded successfully. +# TYPE postgres_exporter_config_last_reload_successful gauge +postgres_exporter_config_last_reload_successful{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 91002 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 527816 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 5 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",code="200"} 37 +promhttp_metric_handler_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",code="500"} 0 +promhttp_metric_handler_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",code="503"} 0 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 65184 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1.2819472e+07 +# HELP pg_stat_database_blks_read_total Number of disk blocks read +# TYPE pg_stat_database_blks_read_total counter +pg_stat_database_blks_read_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 2465 +# HELP pg_stat_database_conflicts_confl_lock_total Queries cancelled due to lock timeouts +# TYPE pg_stat_database_conflicts_confl_lock_total counter +pg_stat_database_conflicts_confl_lock_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 5048 +# HELP pg_scrape_collector_duration_seconds postgres_exporter: Duration of a collector scrape. +# TYPE pg_scrape_collector_duration_seconds gauge +pg_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",collector="database"} 0.003614192 +# HELP pg_stat_bgwriter_checkpoints_req_total Requested checkpoints performed +# TYPE pg_stat_bgwriter_checkpoints_req_total counter +pg_stat_bgwriter_checkpoints_req_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 6 +# HELP pg_stat_database_most_recent_reset The most recent time one of the databases had its statistics reset +# TYPE pg_stat_database_most_recent_reset counter +pg_stat_database_most_recent_reset{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} NaN +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 81480 +# HELP storage_storage_size_mb The total size used for all storage buckets, in mb +# TYPE storage_storage_size_mb gauge +storage_storage_size_mb{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} NaN +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 12 +# HELP physical_replication_lag_is_connected_to_primary Monitor connection to the primary database +# TYPE physical_replication_lag_is_connected_to_primary gauge +physical_replication_lag_is_connected_to_primary{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 2.412352e+06 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 3.80092e+06 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 4805 +# HELP pg_ls_archive_statusdir_wal_pending_count Number of not yet archived WAL files +# TYPE pg_ls_archive_statusdir_wal_pending_count counter +pg_ls_archive_statusdir_wal_pending_count{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_database_temp_files_total Temp files created by queries +# TYPE pg_stat_database_temp_files_total counter +pg_stat_database_temp_files_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",quantile="0"} 1.9857e-05 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",quantile="0.25"} 5.2274e-05 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",quantile="0.5"} 9.5878e-05 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",quantile="0.75"} 0.000117195 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",quantile="1"} 0.041046092 +go_gc_duration_seconds_sum{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0.055028914 +go_gc_duration_seconds_count{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 16 +# HELP pg_stat_database_conflicts_confl_tablespace_total Queries cancelled due to dropped tablespaces +# TYPE pg_stat_database_conflicts_confl_tablespace_total counter +pg_stat_database_conflicts_confl_tablespace_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_bgwriter_maxwritten_clean_total Number of times bg writer stopped a cleaning scan because it had written too many buffers +# TYPE pg_stat_bgwriter_maxwritten_clean_total counter +pg_stat_bgwriter_maxwritten_clean_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_bgwriter_checkpoint_write_time_total Time spent writing checkpoint files to disk +# TYPE pg_stat_bgwriter_checkpoint_write_time_total counter +pg_stat_bgwriter_checkpoint_write_time_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 17715 +# HELP pg_stat_database_tup_deleted_total Rows deleted +# TYPE pg_stat_database_tup_deleted_total counter +pg_stat_database_tup_deleted_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 1436 +# HELP physical_replication_lag_physical_replication_lag_seconds Physical replication lag in seconds +# TYPE physical_replication_lag_physical_replication_lag_seconds gauge +physical_replication_lag_physical_replication_lag_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_bgwriter_buffers_clean_total Buffers written by bg writter +# TYPE pg_stat_bgwriter_buffers_clean_total counter +pg_stat_bgwriter_buffers_clean_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_database_size_bytes Disk space used by the database +# TYPE pg_database_size_bytes gauge +pg_database_size_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",datname="postgres"} 1.0212143e+07 +pg_database_size_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",datname="template0"} 7.631663e+06 +pg_database_size_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",datname="template1"} 7.631663e+06 +# HELP pg_stat_database_conflicts_confl_deadlock_total Queries cancelled due to deadlocks +# TYPE pg_stat_database_conflicts_confl_deadlock_total counter +pg_stat_database_conflicts_confl_deadlock_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 10 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 3.710976e+06 +# HELP pg_scrape_collector_success postgres_exporter: Whether a collector succeeded. +# TYPE pg_scrape_collector_success gauge +pg_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",collector="database"} 1 +# HELP pg_stat_database_conflicts_confl_bufferpin_total Queries cancelled due to pinned buffers +# TYPE pg_stat_database_conflicts_confl_bufferpin_total counter +pg_stat_database_conflicts_confl_bufferpin_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_database_tup_fetched_total Rows fetched by queries +# TYPE pg_stat_database_tup_fetched_total counter +pg_stat_database_tup_fetched_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 253932 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1.8446744073709552e+19 +# HELP pg_stat_database_temp_bytes_total Temp data written by queries +# TYPE pg_stat_database_temp_bytes_total counter +pg_stat_database_temp_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_bgwriter_buffers_checkpoint_total Buffers written during checkpoints +# TYPE pg_stat_bgwriter_buffers_checkpoint_total counter +pg_stat_bgwriter_buffers_checkpoint_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 4888 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 524288 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 4.21888e+06 +# HELP pg_stat_database_deadlocks_total Deadlocks detected +# TYPE pg_stat_database_deadlocks_total counter +pg_stat_database_deadlocks_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_replication_replay_lag Max replay lag +# TYPE pg_stat_replication_replay_lag gauge +pg_stat_replication_replay_lag{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} NaN +# HELP pg_stat_statements_total_queries Number of times executed +# TYPE pg_stat_statements_total_queries counter +pg_stat_statements_total_queries{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 482 +# HELP realtime_postgres_changes_client_subscriptions Client subscriptions listening for Postgres changes +# TYPE realtime_postgres_changes_client_subscriptions gauge +realtime_postgres_changes_client_subscriptions{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP supabase_usage_metrics_user_queries_total The total number of user queries executed +# TYPE supabase_usage_metrics_user_queries_total counter +supabase_usage_metrics_user_queries_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 358 +# HELP pg_stat_database_tup_returned_total Rows returned by queries +# TYPE pg_stat_database_tup_returned_total counter +pg_stat_database_tup_returned_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 715303 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 4.194304e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 458752 +# HELP pg_database_size_mb Disk space used by the database +# TYPE pg_database_size_mb gauge +pg_database_size_mb{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 24.295300483703613 +# HELP pg_stat_database_num_backends The number of active backends +# TYPE pg_stat_database_num_backends gauge +pg_stat_database_num_backends{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 6 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 9.146368e+06 +# HELP pg_stat_replication_send_lag Max send lag +# TYPE pg_stat_replication_send_lag gauge +pg_stat_replication_send_lag{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} NaN +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 458752 +# HELP pg_exporter_last_scrape_duration_seconds Duration of the last scrape of metrics from PostgreSQL. +# TYPE pg_exporter_last_scrape_duration_seconds gauge +pg_exporter_last_scrape_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0.004154578 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 7.929856e+06 +# HELP pg_stat_database_conflicts_confl_snapshot_total Queries cancelled due to old snapshots +# TYPE pg_stat_database_conflicts_confl_snapshot_total counter +pg_stat_database_conflicts_confl_snapshot_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_database_tup_inserted_total Rows inserted +# TYPE pg_stat_database_tup_inserted_total counter +pg_stat_database_tup_inserted_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 22231 +# HELP pg_stat_database_tup_updated_total Rows updated +# TYPE pg_stat_database_tup_updated_total counter +pg_stat_database_tup_updated_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 1959 +# HELP pg_up Whether the last scrape of metrics from PostgreSQL was able to connect to the server (1 for yes, 0 for no). +# TYPE pg_up gauge +pg_up{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1 +# HELP pg_wal_size_mb Disk space used by WAL files +# TYPE pg_wal_size_mb gauge +pg_wal_size_mb{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 80 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 2.412352e+06 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 3.940352e+06 +# HELP pg_settings_default_transaction_read_only Default transaction mode set to read only +# TYPE pg_settings_default_transaction_read_only gauge +pg_settings_default_transaction_read_only{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_database_conflicts_total Queries canceled due to conflicts with recovery +# TYPE pg_stat_database_conflicts_total counter +pg_stat_database_conflicts_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1.6002264e+07 +# HELP pg_stat_bgwriter_buffers_backend_total Buffers written directly by a backend +# TYPE pg_stat_bgwriter_buffers_backend_total counter +pg_stat_bgwriter_buffers_backend_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 9360 +# HELP replication_slots_max_lag_bytes Max Replication Lag +# TYPE replication_slots_max_lag_bytes gauge +replication_slots_max_lag_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 1000 +# HELP pg_stat_activity_xact_runtime Transaction Runtime +# TYPE pg_stat_activity_xact_runtime gauge +pg_stat_activity_xact_runtime{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_database_blks_hit_total Disk blocks found in buffer cache +# TYPE pg_stat_database_blks_hit_total counter +pg_stat_database_blks_hit_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 490933 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0 +# HELP pgrst_schema_cache_query_time_seconds The query time in seconds of the last schema cache load +# TYPE pgrst_schema_cache_query_time_seconds gauge +pgrst_schema_cache_query_time_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest"} 0.000981169 +# HELP pgrst_schema_cache_loads_total The total number of times the schema cache was loaded +# TYPE pgrst_schema_cache_loads_total counter +pgrst_schema_cache_loads_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest",status="FAIL"} 1 +pgrst_schema_cache_loads_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest",status="SUCCESS"} 6 +# HELP pgrst_db_pool_max Max pool connections +# TYPE pgrst_db_pool_max gauge +pgrst_db_pool_max{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest"} 10 +# HELP pgrst_db_pool_waiting Requests waiting to acquire a pool connection +# TYPE pgrst_db_pool_waiting gauge +pgrst_db_pool_waiting{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest"} 0 +# HELP pgrst_db_pool_available Available connections in the pool +# TYPE pgrst_db_pool_available gauge +pgrst_db_pool_available{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest"} -3 +# HELP pgrst_db_pool_timeouts_total The total number of pool connection timeouts +# TYPE pgrst_db_pool_timeouts_total counter +pgrst_db_pool_timeouts_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest"} 0 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 0 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 81600 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 622592 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 10 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1 +# HELP db_sql_connection_max_open Maximum number of open connections to the database +# TYPE db_sql_connection_max_open gauge +db_sql_connection_max_open{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 10 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 41 +# HELP process_runtime_go_mem_live_objects Number of live objects is the number of cumulative Mallocs - Frees +# TYPE process_runtime_go_mem_live_objects gauge +process_runtime_go_mem_live_objects{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 10167 +# HELP db_sql_connection_closed_max_lifetime_total The total number of connections closed due to SetConnMaxLifetime +# TYPE db_sql_connection_closed_max_lifetime_total counter +db_sql_connection_closed_max_lifetime_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 0 +# HELP process_runtime_go_mem_heap_sys_bytes Bytes of heap memory obtained from the OS +# TYPE process_runtime_go_mem_heap_sys_bytes gauge +process_runtime_go_mem_heap_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 7.798784e+06 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.8446744073709552e+19 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 0.13 +# HELP process_runtime_go_goroutines Number of goroutines that currently exist +# TYPE process_runtime_go_goroutines gauge +process_runtime_go_goroutines{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 40 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",code="200"} 0 +promhttp_metric_handler_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",code="500"} 0 +promhttp_metric_handler_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",code="503"} 0 +# HELP http_status_codes_total Number of returned HTTP status codes +# TYPE http_status_codes_total counter +http_status_codes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",code="200",http_route="/health",otel_scope_name="gotrue",otel_scope_version=""} 1 +# HELP process_runtime_go_mem_heap_inuse_bytes Bytes in in-use spans +# TYPE process_runtime_go_mem_heap_inuse_bytes gauge +process_runtime_go_mem_heap_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 3.4816e+06 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 9763 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 70880 +# HELP http_server_response_size_bytes_total Measures the size of HTTP response messages. +# TYPE http_server_response_size_bytes_total counter +http_server_response_size_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0"} 107 +# HELP process_runtime_go_mem_lookups_total Number of pointer lookups performed by the runtime +# TYPE process_runtime_go_mem_lookups_total counter +process_runtime_go_mem_lookups_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 0 +# HELP db_sql_connection_closed_max_idle_time_total The total number of connections closed due to SetConnMaxIdleTime +# TYPE db_sql_connection_closed_max_idle_time_total counter +db_sql_connection_closed_max_idle_time_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 0 +# HELP db_sql_connection_open The number of established connections both in use and idle +# TYPE db_sql_connection_open gauge +db_sql_connection_open{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0",status="idle"} 0 +db_sql_connection_open{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0",status="inuse"} 0 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",quantile="0"} 4.5579e-05 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",quantile="0.25"} 5.4385e-05 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",quantile="0.5"} 0.000106201 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",quantile="0.75"} 0.001139404 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",quantile="1"} 0.008502312 +go_gc_duration_seconds_sum{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 0.019999787 +go_gc_duration_seconds_count{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 20 +# HELP target_info Target metadata +# TYPE target_info gauge +target_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",service_name="unknown_service:auth",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="1.26.0"} 1 +# HELP http_server_request_size_bytes_total Measures the size of HTTP request messages. +# TYPE http_server_request_size_bytes_total counter +http_server_request_size_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0"} 0 +# HELP process_runtime_go_gc_pause_total_ns_total Cumulative nanoseconds in GC stop-the-world pauses since the program started +# TYPE process_runtime_go_gc_pause_total_ns_total counter +process_runtime_go_gc_pause_total_ns_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 1.9999787e+07 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",version="go1.23.3"} 1 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.7332412644466076e+09 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 7.766016e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 622592 +# HELP process_runtime_go_mem_heap_released_bytes Bytes of idle spans whose physical memory has been returned to the OS +# TYPE process_runtime_go_mem_heap_released_bytes gauge +process_runtime_go_mem_heap_released_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 4.13696e+06 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.839616e+06 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 4.13696e+06 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.28167936e+09 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 4.194304e+06 +# HELP process_runtime_go_cgo_calls Number of cgo calls made by the current process +# TYPE process_runtime_go_cgo_calls gauge +process_runtime_go_cgo_calls{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 0 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.73323901352e+09 +# HELP runtime_uptime_milliseconds_total Milliseconds since application was initialized +# TYPE runtime_uptime_milliseconds_total counter +runtime_uptime_milliseconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 2.250715e+06 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 8 +# HELP http_server_duration_milliseconds Measures the duration of inbound HTTP requests. +# TYPE http_server_duration_milliseconds histogram +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="0"} 0 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="5"} 0 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="10"} 0 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="25"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="50"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="75"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="100"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="250"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="500"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="750"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="1000"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="2500"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="5000"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="7500"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="10000"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="+Inf"} 1 +http_server_duration_milliseconds_sum{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0"} 18.807627 +http_server_duration_milliseconds_count{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0"} 1 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.0022912e+07 +# HELP db_sql_connection_wait_duration_milliseconds_total The total time blocked waiting for a new connection +# TYPE db_sql_connection_wait_duration_milliseconds_total counter +db_sql_connection_wait_duration_milliseconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 0 +# HELP db_sql_connection_wait_total The total number of connections waited for +# TYPE db_sql_connection_wait_total counter +db_sql_connection_wait_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 0 +# HELP gotrue_running Whether GoTrue is running (always 1) +# TYPE gotrue_running gauge +gotrue_running{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="gotrue",otel_scope_version=""} 1 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 524288 +# HELP process_runtime_go_mem_heap_alloc_bytes Bytes of allocated heap objects +# TYPE process_runtime_go_mem_heap_alloc_bytes gauge +process_runtime_go_mem_heap_alloc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 1.919776e+06 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 9.748968e+06 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 3.4816e+06 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 4.284416e+06 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 2400 +# HELP process_runtime_go_gc_pause_ns Amount of nanoseconds in GC stop-the-world pauses +# TYPE process_runtime_go_gc_pause_ns histogram +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="0"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="5"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="10"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="25"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="50"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="75"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="100"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="250"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="500"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="750"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="1000"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="2500"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="5000"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="7500"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="10000"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="+Inf"} 20 +process_runtime_go_gc_pause_ns_sum{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 1.9999787e+07 +process_runtime_go_gc_pause_ns_count{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 20 +# HELP process_runtime_go_mem_heap_idle_bytes Bytes in idle (unused) spans +# TYPE process_runtime_go_mem_heap_idle_bytes gauge +process_runtime_go_mem_heap_idle_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 4.317184e+06 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.452855e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 34970 +# HELP process_runtime_go_mem_heap_objects Number of allocated heap objects +# TYPE process_runtime_go_mem_heap_objects gauge +process_runtime_go_mem_heap_objects{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 10167 +# HELP otel_scope_info Instrumentation Scope metadata +# TYPE otel_scope_info gauge +otel_scope_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 1 +otel_scope_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0"} 1 +otel_scope_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 1 +otel_scope_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="gotrue",otel_scope_version=""} 1 +# HELP process_runtime_go_gc_count_total Number of completed garbage collection cycles +# TYPE process_runtime_go_gc_count_total counter +process_runtime_go_gc_count_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 20 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 670129 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.3718792e+07 +# HELP db_sql_connection_closed_max_idle_total The total number of connections closed due to SetMaxIdleConns +# TYPE db_sql_connection_closed_max_idle_total counter +db_sql_connection_closed_max_idle_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 0 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.839616e+06 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 15600 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 3.11e+06 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 44733 \ No newline at end of file diff --git a/supabase/tests/fixtures/storage_api_metrics.txt b/supabase/tests/fixtures/storage_api_metrics.txt new file mode 100644 index 0000000000000..171cba34e6309 --- /dev/null +++ b/supabase/tests/fixtures/storage_api_metrics.txt @@ -0,0 +1,274 @@ + +# HELP storage_api_upload_started Upload started +# TYPE storage_api_upload_started gauge + +# HELP storage_api_upload_success Successful uploads +# TYPE storage_api_upload_success gauge + +# HELP storage_api_database_query_performance Database query performance +# TYPE storage_api_database_query_performance histogram +storage_api_database_query_performance_bucket{le="0.005",name="ListBuckets"} 0 +storage_api_database_query_performance_bucket{le="0.01",name="ListBuckets"} 0 +storage_api_database_query_performance_bucket{le="0.025",name="ListBuckets"} 0 +storage_api_database_query_performance_bucket{le="0.05",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="0.1",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="0.25",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="0.5",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="1",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="2.5",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="5",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="10",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="+Inf",name="ListBuckets"} 1 +storage_api_database_query_performance_sum{name="ListBuckets"} 0.031662833 +storage_api_database_query_performance_count{name="ListBuckets"} 1 + +# HELP storage_api_queue_job_scheduled_time Time taken to schedule a job in the queue +# TYPE storage_api_queue_job_scheduled_time histogram + +# HELP storage_api_queue_job_scheduled Current number of pending messages in the queue +# TYPE storage_api_queue_job_scheduled gauge + +# HELP storage_api_queue_job_completed Current number of processed messages in the queue +# TYPE storage_api_queue_job_completed gauge + +# HELP storage_api_queue_job_retry_failed Current number of failed attempts messages in the queue +# TYPE storage_api_queue_job_retry_failed gauge + +# HELP storage_api_queue_job_error Current number of errored messages in the queue +# TYPE storage_api_queue_job_error gauge + +# HELP storage_api_s3_upload_part S3 upload part performance +# TYPE storage_api_s3_upload_part histogram + +# HELP storage_api_db_pool Number of database pools created +# TYPE storage_api_db_pool gauge +storage_api_db_pool{is_external="false"} 1 + +# HELP storage_api_db_connections Number of database connections +# TYPE storage_api_db_connections gauge +storage_api_db_connections{is_external="false"} 0 + +# HELP storage_api_http_pool_busy_sockets Number of busy sockets currently in use +# TYPE storage_api_http_pool_busy_sockets gauge +storage_api_http_pool_busy_sockets{name="s3_tus",region="stub",protocol="https"} 0 + +# HELP storage_api_http_pool_free_sockets Number of free sockets available for reuse +# TYPE storage_api_http_pool_free_sockets gauge +storage_api_http_pool_free_sockets{name="s3_tus",region="stub",protocol="https"} 0 + +# HELP storage_api_http_pool_requests Number of pending requests waiting for a socket +# TYPE storage_api_http_pool_requests gauge +storage_api_http_pool_requests{name="s3_tus",region="stub"} 0 + +# HELP storage_api_http_pool_errors Number of pending requests waiting for a socket +# TYPE storage_api_http_pool_errors gauge +storage_api_http_pool_errors{name="s3_tus",region="stub",type="socket_error",protocol="https"} 0 +storage_api_http_pool_errors{name="s3_tus",region="stub",type="timeout_socket_error",protocol="https"} 0 +storage_api_http_pool_errors{name="s3_tus",region="stub",type="create_socket_error",protocol="https"} 0 + +# HELP storage_api_http_request_duration_seconds request duration in seconds +# TYPE storage_api_http_request_duration_seconds histogram +storage_api_http_request_duration_seconds_bucket{le="0.005",method="GET",route="/bucket",status_code="2xx"} 0 +storage_api_http_request_duration_seconds_bucket{le="0.01",method="GET",route="/bucket",status_code="2xx"} 0 +storage_api_http_request_duration_seconds_bucket{le="0.025",method="GET",route="/bucket",status_code="2xx"} 0 +storage_api_http_request_duration_seconds_bucket{le="0.05",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="0.1",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="0.25",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="0.5",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="1",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="2.5",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="5",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="10",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="+Inf",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_sum{method="GET",route="/bucket",status_code="2xx"} 0.043428125 +storage_api_http_request_duration_seconds_count{method="GET",route="/bucket",status_code="2xx"} 1 + +# HELP storage_api_http_request_summary_seconds request duration in seconds summary +# TYPE storage_api_http_request_summary_seconds summary +storage_api_http_request_summary_seconds{quantile="0.01",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds{quantile="0.05",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds{quantile="0.5",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds{quantile="0.9",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds{quantile="0.95",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds{quantile="0.99",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds{quantile="0.999",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds_sum{method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds_count{method="GET",route="/bucket",status_code="2xx"} 1 + +# HELP storage_api_process_cpu_user_seconds_total Total user CPU time spent in seconds. +# TYPE storage_api_process_cpu_user_seconds_total counter +storage_api_process_cpu_user_seconds_total{region="stub"} 361.14234300000004 + +# HELP storage_api_process_cpu_system_seconds_total Total system CPU time spent in seconds. +# TYPE storage_api_process_cpu_system_seconds_total counter +storage_api_process_cpu_system_seconds_total{region="stub"} 143.664084 + +# HELP storage_api_process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE storage_api_process_cpu_seconds_total counter +storage_api_process_cpu_seconds_total{region="stub"} 504.80642700000004 + +# HELP storage_api_process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE storage_api_process_start_time_seconds gauge +storage_api_process_start_time_seconds{region="stub"} 1733450910 + +# HELP storage_api_process_resident_memory_bytes Resident memory size in bytes. +# TYPE storage_api_process_resident_memory_bytes gauge +storage_api_process_resident_memory_bytes{region="stub"} 103641088 + +# HELP storage_api_process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE storage_api_process_virtual_memory_bytes gauge +storage_api_process_virtual_memory_bytes{region="stub"} 4783030272 + +# HELP storage_api_process_heap_bytes Process heap size in bytes. +# TYPE storage_api_process_heap_bytes gauge +storage_api_process_heap_bytes{region="stub"} 132231168 + +# HELP storage_api_process_open_fds Number of open file descriptors. +# TYPE storage_api_process_open_fds gauge +storage_api_process_open_fds{region="stub"} 21 + +# HELP storage_api_process_max_fds Maximum number of open file descriptors. +# TYPE storage_api_process_max_fds gauge +storage_api_process_max_fds{region="stub"} 1048576 + +# HELP storage_api_nodejs_eventloop_lag_seconds Lag of event loop in seconds. +# TYPE storage_api_nodejs_eventloop_lag_seconds gauge +storage_api_nodejs_eventloop_lag_seconds{region="stub"} 0.0089925 + +# HELP storage_api_nodejs_eventloop_lag_min_seconds The minimum recorded event loop delay. +# TYPE storage_api_nodejs_eventloop_lag_min_seconds gauge +storage_api_nodejs_eventloop_lag_min_seconds{region="stub"} 0.000014848 + +# HELP storage_api_nodejs_eventloop_lag_max_seconds The maximum recorded event loop delay. +# TYPE storage_api_nodejs_eventloop_lag_max_seconds gauge +storage_api_nodejs_eventloop_lag_max_seconds{region="stub"} 1.198522367 + +# HELP storage_api_nodejs_eventloop_lag_mean_seconds The mean of the recorded event loop delays. +# TYPE storage_api_nodejs_eventloop_lag_mean_seconds gauge +storage_api_nodejs_eventloop_lag_mean_seconds{region="stub"} 0.011911191714967564 + +# HELP storage_api_nodejs_eventloop_lag_stddev_seconds The standard deviation of the recorded event loop delays. +# TYPE storage_api_nodejs_eventloop_lag_stddev_seconds gauge +storage_api_nodejs_eventloop_lag_stddev_seconds{region="stub"} 0.0035951748338251626 + +# HELP storage_api_nodejs_eventloop_lag_p50_seconds The 50th percentile of the recorded event loop delays. +# TYPE storage_api_nodejs_eventloop_lag_p50_seconds gauge +storage_api_nodejs_eventloop_lag_p50_seconds{region="stub"} 0.011395071 + +# HELP storage_api_nodejs_eventloop_lag_p90_seconds The 90th percentile of the recorded event loop delays. +# TYPE storage_api_nodejs_eventloop_lag_p90_seconds gauge +storage_api_nodejs_eventloop_lag_p90_seconds{region="stub"} 0.014335999 + +# HELP storage_api_nodejs_eventloop_lag_p99_seconds The 99th percentile of the recorded event loop delays. +# TYPE storage_api_nodejs_eventloop_lag_p99_seconds gauge +storage_api_nodejs_eventloop_lag_p99_seconds{region="stub"} 0.017448959 + +# HELP storage_api_nodejs_active_resources Number of active resources that are currently keeping the event loop alive, grouped by async resource type. +# TYPE storage_api_nodejs_active_resources gauge +storage_api_nodejs_active_resources{type="PipeWrap",region="stub"} 2 +storage_api_nodejs_active_resources{type="TCPSocketWrap",region="stub"} 2 +storage_api_nodejs_active_resources{type="TCPServerWrap",region="stub"} 1 +storage_api_nodejs_active_resources{type="Timeout",region="stub"} 2 +storage_api_nodejs_active_resources{type="Immediate",region="stub"} 1 + +# HELP storage_api_nodejs_active_resources_total Total number of active resources. +# TYPE storage_api_nodejs_active_resources_total gauge +storage_api_nodejs_active_resources_total{region="stub"} 8 + +# HELP storage_api_nodejs_active_handles Number of active libuv handles grouped by handle type. Every handle type is C++ class name. +# TYPE storage_api_nodejs_active_handles gauge +storage_api_nodejs_active_handles{type="Socket",region="stub"} 4 +storage_api_nodejs_active_handles{type="Server",region="stub"} 1 + +# HELP storage_api_nodejs_active_handles_total Total number of active handles. +# TYPE storage_api_nodejs_active_handles_total gauge +storage_api_nodejs_active_handles_total{region="stub"} 5 + +# HELP storage_api_nodejs_active_requests Number of active libuv requests grouped by request type. Every request type is C++ class name. +# TYPE storage_api_nodejs_active_requests gauge + +# HELP storage_api_nodejs_active_requests_total Total number of active requests. +# TYPE storage_api_nodejs_active_requests_total gauge +storage_api_nodejs_active_requests_total{region="stub"} 0 + +# HELP storage_api_nodejs_heap_size_total_bytes Process heap size from Node.js in bytes. +# TYPE storage_api_nodejs_heap_size_total_bytes gauge +storage_api_nodejs_heap_size_total_bytes{region="stub"} 51707904 + +# HELP storage_api_nodejs_heap_size_used_bytes Process heap size used from Node.js in bytes. +# TYPE storage_api_nodejs_heap_size_used_bytes gauge +storage_api_nodejs_heap_size_used_bytes{region="stub"} 43002696 + +# HELP storage_api_nodejs_external_memory_bytes Node.js external memory size in bytes. +# TYPE storage_api_nodejs_external_memory_bytes gauge +storage_api_nodejs_external_memory_bytes{region="stub"} 3568105 + +# HELP storage_api_nodejs_heap_space_size_total_bytes Process heap space size total from Node.js in bytes. +# TYPE storage_api_nodejs_heap_space_size_total_bytes gauge +storage_api_nodejs_heap_space_size_total_bytes{space="read_only",region="stub"} 0 +storage_api_nodejs_heap_space_size_total_bytes{space="new",region="stub"} 1048576 +storage_api_nodejs_heap_space_size_total_bytes{space="old",region="stub"} 44597248 +storage_api_nodejs_heap_space_size_total_bytes{space="code",region="stub"} 3670016 +storage_api_nodejs_heap_space_size_total_bytes{space="shared",region="stub"} 0 +storage_api_nodejs_heap_space_size_total_bytes{space="new_large_object",region="stub"} 0 +storage_api_nodejs_heap_space_size_total_bytes{space="large_object",region="stub"} 2220032 +storage_api_nodejs_heap_space_size_total_bytes{space="code_large_object",region="stub"} 172032 +storage_api_nodejs_heap_space_size_total_bytes{space="shared_large_object",region="stub"} 0 + +# HELP storage_api_nodejs_heap_space_size_used_bytes Process heap space size used from Node.js in bytes. +# TYPE storage_api_nodejs_heap_space_size_used_bytes gauge +storage_api_nodejs_heap_space_size_used_bytes{space="read_only",region="stub"} 0 +storage_api_nodejs_heap_space_size_used_bytes{space="new",region="stub"} 310136 +storage_api_nodejs_heap_space_size_used_bytes{space="old",region="stub"} 37172576 +storage_api_nodejs_heap_space_size_used_bytes{space="code",region="stub"} 3194320 +storage_api_nodejs_heap_space_size_used_bytes{space="shared",region="stub"} 0 +storage_api_nodejs_heap_space_size_used_bytes{space="new_large_object",region="stub"} 0 +storage_api_nodejs_heap_space_size_used_bytes{space="large_object",region="stub"} 2186264 +storage_api_nodejs_heap_space_size_used_bytes{space="code_large_object",region="stub"} 155296 +storage_api_nodejs_heap_space_size_used_bytes{space="shared_large_object",region="stub"} 0 + +# HELP storage_api_nodejs_heap_space_size_available_bytes Process heap space size available from Node.js in bytes. +# TYPE storage_api_nodejs_heap_space_size_available_bytes gauge +storage_api_nodejs_heap_space_size_available_bytes{space="read_only",region="stub"} 0 +storage_api_nodejs_heap_space_size_available_bytes{space="new",region="stub"} 720744 +storage_api_nodejs_heap_space_size_available_bytes{space="old",region="stub"} 6558624 +storage_api_nodejs_heap_space_size_available_bytes{space="code",region="stub"} 246096 +storage_api_nodejs_heap_space_size_available_bytes{space="shared",region="stub"} 0 +storage_api_nodejs_heap_space_size_available_bytes{space="new_large_object",region="stub"} 1048576 +storage_api_nodejs_heap_space_size_available_bytes{space="large_object",region="stub"} 0 +storage_api_nodejs_heap_space_size_available_bytes{space="code_large_object",region="stub"} 0 +storage_api_nodejs_heap_space_size_available_bytes{space="shared_large_object",region="stub"} 0 + +# HELP storage_api_nodejs_version_info Node.js version info. +# TYPE storage_api_nodejs_version_info gauge +storage_api_nodejs_version_info{version="v20.18.0",major="20",minor="18",patch="0",region="stub"} 1 + +# HELP storage_api_nodejs_gc_duration_seconds Garbage collection duration by kind, one of major, minor, incremental or weakcb. +# TYPE storage_api_nodejs_gc_duration_seconds histogram +storage_api_nodejs_gc_duration_seconds_bucket{le="0.001",kind="minor",region="stub"} 544 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.01",kind="minor",region="stub"} 1002 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.1",kind="minor",region="stub"} 1006 +storage_api_nodejs_gc_duration_seconds_bucket{le="1",kind="minor",region="stub"} 1006 +storage_api_nodejs_gc_duration_seconds_bucket{le="2",kind="minor",region="stub"} 1006 +storage_api_nodejs_gc_duration_seconds_bucket{le="5",kind="minor",region="stub"} 1006 +storage_api_nodejs_gc_duration_seconds_bucket{le="+Inf",kind="minor",region="stub"} 1006 +storage_api_nodejs_gc_duration_seconds_sum{kind="minor",region="stub"} 1.4403644915223157 +storage_api_nodejs_gc_duration_seconds_count{kind="minor",region="stub"} 1006 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.001",kind="incremental",region="stub"} 4 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.01",kind="incremental",region="stub"} 8 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.1",kind="incremental",region="stub"} 12 +storage_api_nodejs_gc_duration_seconds_bucket{le="1",kind="incremental",region="stub"} 12 +storage_api_nodejs_gc_duration_seconds_bucket{le="2",kind="incremental",region="stub"} 12 +storage_api_nodejs_gc_duration_seconds_bucket{le="5",kind="incremental",region="stub"} 12 +storage_api_nodejs_gc_duration_seconds_bucket{le="+Inf",kind="incremental",region="stub"} 12 +storage_api_nodejs_gc_duration_seconds_sum{kind="incremental",region="stub"} 0.07946879202127458 +storage_api_nodejs_gc_duration_seconds_count{kind="incremental",region="stub"} 12 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.001",kind="major",region="stub"} 0 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.01",kind="major",region="stub"} 10 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.1",kind="major",region="stub"} 11 +storage_api_nodejs_gc_duration_seconds_bucket{le="1",kind="major",region="stub"} 11 +storage_api_nodejs_gc_duration_seconds_bucket{le="2",kind="major",region="stub"} 11 +storage_api_nodejs_gc_duration_seconds_bucket{le="5",kind="major",region="stub"} 11 +storage_api_nodejs_gc_duration_seconds_bucket{le="+Inf",kind="major",region="stub"} 11 +storage_api_nodejs_gc_duration_seconds_sum{kind="major",region="stub"} 0.04609945893287658 +storage_api_nodejs_gc_duration_seconds_count{kind="major",region="stub"} 11 \ No newline at end of file diff --git a/supabase/tests/test_e2e.py b/supabase/tests/test_e2e.py new file mode 100644 index 0000000000000..3357d5a6d148f --- /dev/null +++ b/supabase/tests/test_e2e.py @@ -0,0 +1,13 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.constants import ServiceCheck +from datadog_checks.dev.utils import assert_service_checks + + +def test_e2e_openmetrics_v2(dd_agent_check): + aggregator = dd_agent_check() + + aggregator.assert_service_check('supabase.openmetrics.health', ServiceCheck.OK, count=1) + aggregator.assert_service_check('supabase.storage_api.openmetrics.health', ServiceCheck.OK, count=1) + assert_service_checks(aggregator) diff --git a/supabase/tests/test_unit.py b/supabase/tests/test_unit.py new file mode 100644 index 0000000000000..774eec9646831 --- /dev/null +++ b/supabase/tests/test_unit.py @@ -0,0 +1,50 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import pytest + +from datadog_checks.base.constants import ServiceCheck +from datadog_checks.dev.utils import get_metadata_metrics +from datadog_checks.supabase import SupabaseCheck + +from .common import ( + PRIVILEGED_METRICS, + PRIVILEGED_METRICS_INSTANCE, + PRIVILEGED_METRICS_NAMESPACE, + STORAGE_API_INSTANCE, + STORAGE_API_METRICS, + STORAGE_API_METRICS_NAMESPACE, + get_fixture_path, +) + + +@pytest.mark.parametrize( + 'namespace, instance, metrics, fixture_name,', + [ + (PRIVILEGED_METRICS_NAMESPACE, PRIVILEGED_METRICS_INSTANCE, PRIVILEGED_METRICS, 'privileged_metrics.txt'), + (STORAGE_API_METRICS_NAMESPACE, STORAGE_API_INSTANCE, STORAGE_API_METRICS, 'storage_api_metrics.txt'), + ], +) +def test_check_mock_supabase_openmetrics( + dd_run_check, instance, aggregator, fixture_name, metrics, mock_http_response, namespace +): + mock_http_response(file_path=get_fixture_path(fixture_name)) + check = SupabaseCheck('supabase', {}, [instance]) + dd_run_check(check) + + for metric in metrics: + aggregator.assert_metric(metric) + aggregator.assert_metric_has_tag(metric, 'test:test') + + aggregator.assert_all_metrics_covered() + aggregator.assert_metrics_using_metadata(get_metadata_metrics()) + aggregator.assert_service_check(f'{namespace}.openmetrics.health', ServiceCheck.OK) + + +def test_empty_instance(dd_run_check): + with pytest.raises( + Exception, + match='Must specify at least one of the following:`privileged_metrics_endpoint` or `storage_api_endpoint`.', + ): + check = SupabaseCheck('supabase', {}, [{}]) + dd_run_check(check)