diff --git a/.builders/deps/build_dependencies.txt b/.builders/deps/build_dependencies.txt index ce24939925ffb..05650ba92bf13 100644 --- a/.builders/deps/build_dependencies.txt +++ b/.builders/deps/build_dependencies.txt @@ -1,13 +1,9 @@ -hatchling==1.21.1; python_version > '3.0' -hatchling==0.25.1; python_version < '3.0' -setuptools==66.1.1; python_version > '3.0' -setuptools==40.9.0; python_version < '3.0' -wheel==0.38.4; python_version > '3.0' -wheel==0.37.1; python_version < '3.0' -setuptools-scm; python_version > '3.0' -setuptools-scm==5.0.2; python_version < '3.0' -setuptools-rust>=1.7.0; python_version > '3.0' -maturin; python_version > '3.0' +hatchling==1.21.1 +setuptools==75.6.0 +wheel==0.38.4 +setuptools-scm +setuptools-rust>=1.7.0 +maturin cffi>=1.12 cython==3.0.11 -tomli>=2.0.1; python_version > '3.0' +tomli>=2.0.1 diff --git a/.builders/images/windows-x86_64/Dockerfile b/.builders/images/windows-x86_64/Dockerfile index 68dcd5e044e47..589f4f85a1ae3 100644 --- a/.builders/images/windows-x86_64/Dockerfile +++ b/.builders/images/windows-x86_64/Dockerfile @@ -35,6 +35,15 @@ RUN curl -SL --output PowerShell-%POWERSHELL_VERSION%-win-x64.msi https://github COPY helpers.ps1 C:\helpers.ps1 SHELL ["pwsh", "-Command", ". C:\\helpers.ps1;"] +# Enable long paths +# https://learn.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=powershell#registry-setting-to-enable-long-paths +RUN New-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Control\FileSystem" ` + -Name "LongPathsEnabled" -Value 1 -PropertyType DWORD -Force +# Reduce the chance of hitting path limits (the MSVC compiler cl.exe doesn't seem to respect that optioin) +# This variable is honored by pip +ENV TMP="C:\tmp" ` + TEMP="C:\tmp" + # Install 7-Zip ZS ENV 7ZIP_VERSION="22.01" ` 7ZIP_ZS_VERSION="1.5.5-R3" @@ -95,10 +104,36 @@ RUN Get-RemoteFile ` Remove-Item $Env:IBM_MQ_VERSION-IBM-MQC-Redist-Win64.zip; ` setx /M MQ_FILE_PATH 'C:\ibm_mq' +# Perl +ENV PERL_VERSION="5.40.0.1" +RUN Get-RemoteFile ` + -Uri https://github.com/StrawberryPerl/Perl-Dist-Strawberry/releases/download/SP_54001_64bit_UCRT/strawberry-perl-$Env:PERL_VERSION-64bit-portable.zip ` + -Path "strawberry-perl-$Env:PERL_VERSION-64bit.zip" ` + -Hash '754f3e2a8e473dc68d1540c7802fb166a025f35ef18960c4564a31f8b5933907' && ` + 7z x "strawberry-perl-$Env:PERL_VERSION-64bit.zip" -o"C:\perl" && ` + Add-ToPath -Append "C:\perl\perl\bin" && ` + Remove-Item "strawberry-perl-$Env:PERL_VERSION-64bit.zip" + +ENV OPENSSL_VERSION="3.3.2" + # Set up runner COPY runner_dependencies.txt C:\runner_dependencies.txt RUN python -m pip install --no-warn-script-location -r C:\runner_dependencies.txt +COPY build_script.ps1 C:\build_script.ps1 +COPY update_librdkafka_manifest.py C:\update_librdkafka_manifest.py +ENV DD_BUILD_COMMAND="pwsh C:\build_script.ps1" + +# Python packages that we want to build regardless of whether prebuilt versions exist on PyPI +ENV PIP_NO_BINARY="confluent_kafka" +# Where to find native dependencies when building extensions and for wheel repairing +RUN New-Item -Path "C:\include" -ItemType Directory +RUN New-Item -Path "C:\lib" -ItemType Directory +RUN New-Item -Path "C:\bin" -ItemType Directory +ENV INCLUDE="C:\include" +ENV LIB="C:\lib" +RUN Add-ToPath -Append "C:\bin" + # Restore the default Windows shell for correct batch processing. SHELL ["cmd", "/S", "/C"] diff --git a/.builders/images/windows-x86_64/build_script.ps1 b/.builders/images/windows-x86_64/build_script.ps1 new file mode 100644 index 0000000000000..638b32a747426 --- /dev/null +++ b/.builders/images/windows-x86_64/build_script.ps1 @@ -0,0 +1,57 @@ +$ErrorActionPreference = 'Stop' +$PSNativeCommandUseErrorActionPreference = $true + +. C:\helpers.ps1 + +# The librdkafka version needs to stay in sync with the confluent-kafka version, +# thus we extract the version from the requirements file +$kafka_version = Get-Content 'C:\mnt\requirements.in' | perl -nE 'say/^\D*(\d+\.\d+\.\d+)\D*$/ if /confluent-kafka==/' +Write-Host "Will build librdkafka $kafka_version" + +# Download and unpack the source +Get-RemoteFile ` + -Uri "https://github.com/confluentinc/librdkafka/archive/refs/tags/v${kafka_version}.tar.gz" ` + -Path "librdkafka-${kafka_version}.tar.gz" ` + -Hash '0ddf205ad8d36af0bc72a2fec20639ea02e1d583e353163bf7f4683d949e901b' +7z x "librdkafka-${kafka_version}.tar.gz" -o"C:\" +7z x "C:\librdkafka-${kafka_version}.tar" -o"C:\librdkafka" +Remove-Item "librdkafka-${kafka_version}.tar.gz" + +# Build librdkafka +# Based on this job from upstream: +# https://github.com/confluentinc/librdkafka/blob/cb8c19c43011b66c4b08b25e5150455a247e1ff3/.semaphore/semaphore.yml#L265 +# Install vcpkg +Set-Location "C:\" +$triplet = "x64-windows" +$librdkafka_dir = "C:\librdkafka\librdkafka-${kafka_version}" + +& "${librdkafka_dir}\win32\setup-vcpkg.ps1" +# Get deps +Set-Location "$librdkafka_dir" +# Patch the the vcpkg manifest to to override the OpenSSL version +python C:\update_librdkafka_manifest.py vcpkg.json --set-version openssl:${Env:OPENSSL_VERSION} + +C:\vcpkg\vcpkg integrate install +C:\vcpkg\vcpkg --feature-flags=versions install --triplet $triplet +# Build +& .\win32\msbuild.ps1 -platform x64 + +# Copy outputs to where they can be found +# This is partially inspired by +# https://github.com/confluentinc/librdkafka/blob/cb8c19c43011b66c4b08b25e5150455a247e1ff3/win32/package-zip.ps1 +$toolset = "v142" +$platform = "x64" +$config = "Release" +$srcdir = "win32\outdir\${toolset}\${platform}\$config" +$bindir = "C:\bin" +$libdir = "C:\lib" +$includedir = "C:\include" + +Copy-Item "${srcdir}\librdkafka.dll","${srcdir}\librdkafkacpp.dll", +"${srcdir}\libcrypto-3-x64.dll","${srcdir}\libssl-3-x64.dll", +"${srcdir}\zlib1.dll","${srcdir}\zstd.dll","${srcdir}\libcurl.dll" -Destination $bindir +Copy-Item "${srcdir}\librdkafka.lib","${srcdir}\librdkafkacpp.lib" -Destination $libdir + +New-Item -Path $includedir\librdkafka -ItemType Directory +Copy-Item -Path ".\src\*" -Filter *.h -Destination $includedir\librdkafka + diff --git a/.builders/images/windows-x86_64/update_librdkafka_manifest.py b/.builders/images/windows-x86_64/update_librdkafka_manifest.py new file mode 100644 index 0000000000000..8d3e098ab073e --- /dev/null +++ b/.builders/images/windows-x86_64/update_librdkafka_manifest.py @@ -0,0 +1,25 @@ +import json +from argparse import ArgumentParser + + +def main(manifest_file, versions): + with open(manifest_file) as f: + manifest = json.load(f) + + for dep, version in versions.items(): + manifest.setdefault("overrides", []).append({ + "name": dep, + "version": version, + }) + + with open(manifest_file, 'w') as f: + json.dump(manifest, f) + + +if __name__ == '__main__': + ap = ArgumentParser() + ap.add_argument("file") + ap.add_argument("--set-version", action="append", required=True) + + args = ap.parse_args() + main(args.file, dict(spec.split(':') for spec in args.set_version)) diff --git a/.builders/scripts/repair_wheels.py b/.builders/scripts/repair_wheels.py index 771c536f0f064..60712e5b0b6cf 100644 --- a/.builders/scripts/repair_wheels.py +++ b/.builders/scripts/repair_wheels.py @@ -6,10 +6,12 @@ import shutil import sys import time +from fnmatch import fnmatch from functools import cache from hashlib import sha256 from pathlib import Path from typing import Iterator, NamedTuple +from zipfile import ZipFile import urllib3 from utils import extract_metadata, normalize_project_name @@ -64,6 +66,14 @@ def wheel_was_built(wheel: Path) -> bool: return file_hash != wheel_hashes[wheel.name] +def find_patterns_in_wheel(wheel: Path, patterns: list[str]) -> list[str]: + """Returns all found files inside `wheel` that match the given glob-style pattern""" + with ZipFile(wheel) as zf: + names = zf.namelist() + + return [name for name in names for pat in patterns if fnmatch(name, pat)] + + class WheelName(NamedTuple): """Helper class to manipulate wheel names.""" # Note: this implementation ignores build tags (it drops them on parsing) @@ -98,6 +108,12 @@ def repair_linux(source_dir: str, built_dir: str, external_dir: str) -> None: 'libmqic_r.so', }) + external_invalid_file_patterns = [ + # We don't accept OpenSSL in external wheels + '*.libs/libssl*.so.3', + '*.libs/libcrypto*.so.3', + ] + # Hardcoded policy to the minimum we need to currently support policies = WheelPolicies() policy = policies.get_policy_by_name(os.environ['MANYLINUX_POLICY']) @@ -109,8 +125,19 @@ def repair_linux(source_dir: str, built_dir: str, external_dir: str) -> None: for wheel in iter_wheels(source_dir): print(f'--> {wheel.name}') + if not wheel_was_built(wheel): print('Using existing wheel') + + unacceptable_files = find_patterns_in_wheel(wheel, external_invalid_file_patterns) + if unacceptable_files: + print( + f"Found copies of unacceptable files in external wheel '{wheel.name}'", + f'(matching {external_invalid_file_patterns}): ', + unacceptable_files, + ) + sys.exit(1) + shutil.move(wheel, external_dir) continue @@ -140,11 +167,27 @@ def repair_windows(source_dir: str, built_dir: str, external_dir: str) -> None: exclusions = ['mqic.dll'] + external_invalid_file_patterns = [ + # We don't accept OpenSSL in external wheels + '*.libs/libssl-3*.dll', + '*.libs/libcrypto-3*.dll', + ] + for wheel in iter_wheels(source_dir): print(f'--> {wheel.name}') if not wheel_was_built(wheel): print('Using existing wheel') + + unacceptable_files = find_patterns_in_wheel(wheel, external_invalid_file_patterns) + if unacceptable_files: + print( + f"Found copies of unacceptable files in external wheel '{wheel.name}'", + f'(matching {external_invalid_file_patterns}): ', + unacceptable_files, + ) + sys.exit(1) + shutil.move(wheel, external_dir) continue @@ -206,6 +249,7 @@ def copy_filt_func(libname): print(f'--> {wheel.name}') if not wheel_was_built(wheel): print('Using existing wheel') + shutil.move(wheel, external_dir) continue diff --git a/.codecov.yml b/.codecov.yml index 105f39b3902da..248475af4b933 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -498,6 +498,10 @@ coverage: target: 75 flags: - pulsar + Quarkus: + target: 75 + flags: + - quarkus RabbitMQ: target: 75 flags: @@ -1373,6 +1377,11 @@ flags: paths: - pulsar/datadog_checks/pulsar - pulsar/tests + quarkus: + carryforward: true + paths: + - quarkus/datadog_checks/quarkus + - quarkus/tests rabbitmq: carryforward: true paths: diff --git a/.deps/image_digests.json b/.deps/image_digests.json index 95d9d3c980fa4..cc352434031bf 100644 --- a/.deps/image_digests.json +++ b/.deps/image_digests.json @@ -1,5 +1,5 @@ { - "linux-aarch64": "sha256:0c67a49a4d4ec217dd0f841ee139eaf061616f6e61c6bc758617d4c50c7a8aa2", - "linux-x86_64": "sha256:5e421218e377e4c1d0769b148e569f4ff4a8c60fbd2be8411db9158a644a0b0a", - "windows-x86_64": "sha256:feefe940fe3f382bf4833bc29a9d614d6f6bb3592258a905a261167184b20eab" + "linux-aarch64": "sha256:03314aedd5b8a67258d476984629004e52f7299123897a83fd2aee8c13a7995a", + "linux-x86_64": "sha256:01a85cfe9b017760a3d485e5e2d01ef263f807797cb8b0d6ce10a9ed76a2026b", + "windows-x86_64": "sha256:869dd119f9b08b08cd21abeec3ae5b2b4c8967dc6ae699c0503ffe1e50bd939b" } diff --git a/.deps/resolved/linux-aarch64_3.12.txt b/.deps/resolved/linux-aarch64_3.12.txt index 0f6b6df2b96c2..f7e8dca8656a7 100644 --- a/.deps/resolved/linux-aarch64_3.12.txt +++ b/.deps/resolved/linux-aarch64_3.12.txt @@ -1,7 +1,7 @@ aerospike @ https://agent-int-packages.datadoghq.com/built/aerospike/aerospike-7.1.1-20241015150445-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl#sha256=72935084f1d5a8052c38c2e790b616a9fb24c518b301391075d7d4dcd9be848b annotated-types @ https://agent-int-packages.datadoghq.com/external/annotated-types/annotated_types-0.7.0-py3-none-any.whl#sha256=1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 asn1crypto @ https://agent-int-packages.datadoghq.com/external/asn1crypto/asn1crypto-1.5.1-py2.py3-none-any.whl#sha256=db4e40728b728508912cbb3d44f19ce188f218e9eba635821bb4b68564f8fd67 -attrs @ https://agent-int-packages.datadoghq.com/external/attrs/attrs-24.2.0-py3-none-any.whl#sha256=81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2 +attrs @ https://agent-int-packages.datadoghq.com/external/attrs/attrs-24.3.0-py3-none-any.whl#sha256=ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308 aws-requests-auth @ https://agent-int-packages.datadoghq.com/external/aws-requests-auth/aws_requests_auth-0.4.3-py2.py3-none-any.whl#sha256=646bc37d62140ea1c709d20148f5d43197e6bd2d63909eb36fa4bb2345759977 azure-core @ https://agent-int-packages.datadoghq.com/external/azure-core/azure_core-1.32.0-py3-none-any.whl#sha256=eac191a0efb23bfa83fddf321b27b122b4ec847befa3091fa736a5c32c50d7b4 azure-identity @ https://agent-int-packages.datadoghq.com/external/azure-identity/azure_identity-1.17.1-py3-none-any.whl#sha256=db8d59c183b680e763722bfe8ebc45930e6c57df510620985939f7f3191e0382 @@ -13,13 +13,13 @@ botocore @ https://agent-int-packages.datadoghq.com/external/botocore/botocore-1 bytecode @ https://agent-int-packages.datadoghq.com/external/bytecode/bytecode-0.16.0-py3-none-any.whl#sha256=76080b7c0eb9e7e17f961d61fd06e933aa47f3b753770a3249537439d8203a25 cachetools @ https://agent-int-packages.datadoghq.com/external/cachetools/cachetools-5.5.0-py3-none-any.whl#sha256=02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292 cattrs @ https://agent-int-packages.datadoghq.com/external/cattrs/cattrs-24.1.2-py3-none-any.whl#sha256=67c7495b760168d931a10233f979b28dc04daf853b30752246f4f8471c6d68d0 -certifi @ https://agent-int-packages.datadoghq.com/external/certifi/certifi-2024.8.30-py3-none-any.whl#sha256=922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8 +certifi @ https://agent-int-packages.datadoghq.com/external/certifi/certifi-2024.12.14-py3-none-any.whl#sha256=1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56 cffi @ https://agent-int-packages.datadoghq.com/external/cffi/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl#sha256=da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5 charset-normalizer @ https://agent-int-packages.datadoghq.com/external/charset-normalizer/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl#sha256=b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1 clickhouse-cityhash @ https://agent-int-packages.datadoghq.com/external/clickhouse-cityhash/clickhouse_cityhash-1.0.2.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl#sha256=bbfd83713e5a7a700c4a8200e921bc580fd7cba5f3b9d732172a5d82b12b3e20 clickhouse-driver @ https://agent-int-packages.datadoghq.com/external/clickhouse-driver/clickhouse_driver-0.2.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl#sha256=4a8d8e2888a857d8db3d98765a5ad23ab561241feaef68bbffc5a0bd9c142342 cm-client @ https://agent-int-packages.datadoghq.com/built/cm-client/cm_client-45.0.4-20240402155018-py3-none-manylinux2014_aarch64.whl#sha256=aba3c1683ef1b2099933e030464d29b3ad1c206784ebd15d8a7147ecd6ba24e1 -confluent-kafka @ https://agent-int-packages.datadoghq.com/built/confluent-kafka/confluent_kafka-2.6.1-20241121135419-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl#sha256=94e5f68705b3f0887b8780058defd64cce9cffaebd5cd3c7fb3d1b34b6fe63f3 +confluent-kafka @ https://agent-int-packages.datadoghq.com/built/confluent-kafka/confluent_kafka-2.6.1-20241216144602-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl#sha256=32d125133b9c34d34d1773ecd5458bf6fafff070fd626537b0ceacef4ef5e658 cryptography @ https://agent-int-packages.datadoghq.com/external/cryptography/cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl#sha256=1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806 ddsketch @ https://agent-int-packages.datadoghq.com/external/ddsketch/ddsketch-3.0.1-py3-none-any.whl#sha256=6d047b455fe2837c43d366ff1ae6ba0c3166e15499de8688437a75cea914224e ddtrace @ https://agent-int-packages.datadoghq.com/external/ddtrace/ddtrace-2.10.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl#sha256=081bb12a54c46c9804e0645320d827deaff626b9035ba13ac97567149e07cdb5 @@ -30,7 +30,7 @@ dogpile-cache @ https://agent-int-packages.datadoghq.com/external/dogpile-cache/ envier @ https://agent-int-packages.datadoghq.com/external/envier/envier-0.6.1-py3-none-any.whl#sha256=73609040a76be48bbcb97074d9969666484aa0de706183a6e9ef773156a8a6a9 filelock @ https://agent-int-packages.datadoghq.com/external/filelock/filelock-3.16.1-py3-none-any.whl#sha256=2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0 foundationdb @ https://agent-int-packages.datadoghq.com/built/foundationdb/foundationdb-6.3.24-20240402155019-py3-none-manylinux2014_aarch64.whl#sha256=14259f824080062cc890965747597ff00a9d6c76a1eb926673fed68a45860ccd -google-auth @ https://agent-int-packages.datadoghq.com/external/google-auth/google_auth-2.36.0-py2.py3-none-any.whl#sha256=51a15d47028b66fd36e5c64a82d2d57480075bccc7da37cde257fc94177a61fb +google-auth @ https://agent-int-packages.datadoghq.com/external/google-auth/google_auth-2.37.0-py2.py3-none-any.whl#sha256=42664f18290a6be591be5329a96fe30184be1a1badb7292a7f686a9659de9ca0 gssapi @ https://agent-int-packages.datadoghq.com/built/gssapi/gssapi-1.9.0-20241016152407-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl#sha256=333df911dc6d0f7eb3efc95eea635be7bea78cf43467e8785210a70eef2d743d hazelcast-python-client @ https://agent-int-packages.datadoghq.com/external/hazelcast-python-client/hazelcast_python_client-5.4.0-py3-none-any.whl#sha256=16195cd58feb2dd3be1594d08d42527ae00797548a6a9d6a601aae2e8514ff5f idna @ https://agent-int-packages.datadoghq.com/external/idna/idna-3.10-py3-none-any.whl#sha256=946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 @@ -54,7 +54,7 @@ msal-extensions @ https://agent-int-packages.datadoghq.com/external/msal-extensi netifaces @ https://agent-int-packages.datadoghq.com/built/netifaces/netifaces-0.11.0-20241015150447-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl#sha256=b6b2709da2652fae1f14910662237f61f72f37e3acae3dee1edbf8ffe77050c1 oauthlib @ https://agent-int-packages.datadoghq.com/external/oauthlib/oauthlib-3.2.2-py3-none-any.whl#sha256=8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca openstacksdk @ https://agent-int-packages.datadoghq.com/external/openstacksdk/openstacksdk-3.3.0-py3-none-any.whl#sha256=e6d4121b87354984caf0e3c032e2ebf4d4440374f86c81c27ec52ca5df359157 -opentelemetry-api @ https://agent-int-packages.datadoghq.com/external/opentelemetry-api/opentelemetry_api-1.28.2-py3-none-any.whl#sha256=6fcec89e265beb258fe6b1acaaa3c8c705a934bd977b9f534a2b7c0d2d4275a6 +opentelemetry-api @ https://agent-int-packages.datadoghq.com/external/opentelemetry-api/opentelemetry_api-1.29.0-py3-none-any.whl#sha256=5fcd94c4141cc49c736271f3e1efb777bebe9cc535759c54c936cca4f1b312b8 orjson @ https://agent-int-packages.datadoghq.com/external/orjson/orjson-3.10.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl#sha256=76ac14cd57df0572453543f8f2575e2d01ae9e790c21f57627803f5e79b0d3c3 os-service-types @ https://agent-int-packages.datadoghq.com/external/os-service-types/os_service_types-1.7.0-py2.py3-none-any.whl#sha256=0505c72205690910077fb72b88f2a1f07533c8d39f2fe75b29583481764965d6 packaging @ https://agent-int-packages.datadoghq.com/external/packaging/packaging-24.1-py3-none-any.whl#sha256=5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 @@ -79,7 +79,7 @@ pyjwt @ https://agent-int-packages.datadoghq.com/external/pyjwt/PyJWT-2.9.0-py3- pymongo @ https://agent-int-packages.datadoghq.com/external/pymongo/pymongo-4.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl#sha256=c6b804bb4f2d9dc389cc9e827d579fa327272cdb0629a99bfe5b83cb3e269ebf pymysql @ https://agent-int-packages.datadoghq.com/external/pymysql/PyMySQL-1.1.1-py3-none-any.whl#sha256=4de15da4c61dc132f4fb9ab763063e693d521a80fd0e87943b9a453dd4c19d6c pynacl @ https://agent-int-packages.datadoghq.com/external/pynacl/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl#sha256=a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394 -pyodbc @ https://agent-int-packages.datadoghq.com/built/pyodbc/pyodbc-5.1.0-20241015150447-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl#sha256=17fea91c69efad2f266995feda9456c78419fec7a3cb5e40542e809a57f7f497 +pyodbc @ https://agent-int-packages.datadoghq.com/built/pyodbc/pyodbc-5.1.0-20241216144602-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl#sha256=51b8a5fbd54dddbfac541df903a0bee9f30faa80f5f7676dcd11cad3a3d76cfe pyopenssl @ https://agent-int-packages.datadoghq.com/external/pyopenssl/pyOpenSSL-24.2.1-py3-none-any.whl#sha256=967d5719b12b243588573f39b0c677637145c7a1ffedcd495a487e58177fbb8d pysmi @ https://agent-int-packages.datadoghq.com/external/pysmi/pysmi-1.2.1-py3-none-any.whl#sha256=d97c60de9f81d33ab2899124d95a94fa7fefacc86ab6e00cbfec543a073e6d33 pysnmp @ https://agent-int-packages.datadoghq.com/external/pysnmp/pysnmp-5.1.0-py3-none-any.whl#sha256=375a8adfc6820faf24ace6761a6d20544e60580d714ff7266df272850c39b439 diff --git a/.deps/resolved/linux-x86_64_3.12.txt b/.deps/resolved/linux-x86_64_3.12.txt index 823a373300dcc..3f73649fb57c0 100644 --- a/.deps/resolved/linux-x86_64_3.12.txt +++ b/.deps/resolved/linux-x86_64_3.12.txt @@ -1,7 +1,7 @@ aerospike @ https://agent-int-packages.datadoghq.com/built/aerospike/aerospike-7.1.1-20241015150437-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl#sha256=65a607b222936c274d7099bcc93616724551db5a7b7e2e101ca3cf0f41549853 annotated-types @ https://agent-int-packages.datadoghq.com/external/annotated-types/annotated_types-0.7.0-py3-none-any.whl#sha256=1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 asn1crypto @ https://agent-int-packages.datadoghq.com/external/asn1crypto/asn1crypto-1.5.1-py2.py3-none-any.whl#sha256=db4e40728b728508912cbb3d44f19ce188f218e9eba635821bb4b68564f8fd67 -attrs @ https://agent-int-packages.datadoghq.com/external/attrs/attrs-24.2.0-py3-none-any.whl#sha256=81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2 +attrs @ https://agent-int-packages.datadoghq.com/external/attrs/attrs-24.3.0-py3-none-any.whl#sha256=ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308 aws-requests-auth @ https://agent-int-packages.datadoghq.com/external/aws-requests-auth/aws_requests_auth-0.4.3-py2.py3-none-any.whl#sha256=646bc37d62140ea1c709d20148f5d43197e6bd2d63909eb36fa4bb2345759977 azure-core @ https://agent-int-packages.datadoghq.com/external/azure-core/azure_core-1.32.0-py3-none-any.whl#sha256=eac191a0efb23bfa83fddf321b27b122b4ec847befa3091fa736a5c32c50d7b4 azure-identity @ https://agent-int-packages.datadoghq.com/external/azure-identity/azure_identity-1.17.1-py3-none-any.whl#sha256=db8d59c183b680e763722bfe8ebc45930e6c57df510620985939f7f3191e0382 @@ -13,13 +13,13 @@ botocore @ https://agent-int-packages.datadoghq.com/external/botocore/botocore-1 bytecode @ https://agent-int-packages.datadoghq.com/external/bytecode/bytecode-0.16.0-py3-none-any.whl#sha256=76080b7c0eb9e7e17f961d61fd06e933aa47f3b753770a3249537439d8203a25 cachetools @ https://agent-int-packages.datadoghq.com/external/cachetools/cachetools-5.5.0-py3-none-any.whl#sha256=02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292 cattrs @ https://agent-int-packages.datadoghq.com/external/cattrs/cattrs-24.1.2-py3-none-any.whl#sha256=67c7495b760168d931a10233f979b28dc04daf853b30752246f4f8471c6d68d0 -certifi @ https://agent-int-packages.datadoghq.com/external/certifi/certifi-2024.8.30-py3-none-any.whl#sha256=922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8 +certifi @ https://agent-int-packages.datadoghq.com/external/certifi/certifi-2024.12.14-py3-none-any.whl#sha256=1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56 cffi @ https://agent-int-packages.datadoghq.com/external/cffi/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93 charset-normalizer @ https://agent-int-packages.datadoghq.com/external/charset-normalizer/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15 clickhouse-cityhash @ https://agent-int-packages.datadoghq.com/external/clickhouse-cityhash/clickhouse_cityhash-1.0.2.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=f1f8fec4027cd648f72009ef59c9b76c5a27a33ca166b4e79e46542009429813 clickhouse-driver @ https://agent-int-packages.datadoghq.com/external/clickhouse-driver/clickhouse_driver-0.2.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=6dbcee870c60d9835e5dce1456ab6b9d807e6669246357f4b321ef747b90fa43 cm-client @ https://agent-int-packages.datadoghq.com/built/cm-client/cm_client-45.0.4-20240402154838-py3-none-manylinux2014_x86_64.whl#sha256=aba3c1683ef1b2099933e030464d29b3ad1c206784ebd15d8a7147ecd6ba24e1 -confluent-kafka @ https://agent-int-packages.datadoghq.com/built/confluent-kafka/confluent_kafka-2.6.1-20241121135410-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=bd3954ae8bd22d8d8b5e0c77ad6b51751f7da0a61e30018775c3acbf09edd9d3 +confluent-kafka @ https://agent-int-packages.datadoghq.com/built/confluent-kafka/confluent_kafka-2.6.1-20241216144614-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=025012b00a824e6476c23d5bdacd6d7aa749a05e09f1d4b8acf3284b0d65f449 cryptography @ https://agent-int-packages.datadoghq.com/external/cryptography/cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85 ddsketch @ https://agent-int-packages.datadoghq.com/external/ddsketch/ddsketch-3.0.1-py3-none-any.whl#sha256=6d047b455fe2837c43d366ff1ae6ba0c3166e15499de8688437a75cea914224e ddtrace @ https://agent-int-packages.datadoghq.com/external/ddtrace/ddtrace-2.10.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=5fc70ac472093093d9908965d95d977206372a3ddc8a2562acf9dfd57c6864d8 @@ -30,7 +30,7 @@ dogpile-cache @ https://agent-int-packages.datadoghq.com/external/dogpile-cache/ envier @ https://agent-int-packages.datadoghq.com/external/envier/envier-0.6.1-py3-none-any.whl#sha256=73609040a76be48bbcb97074d9969666484aa0de706183a6e9ef773156a8a6a9 filelock @ https://agent-int-packages.datadoghq.com/external/filelock/filelock-3.16.1-py3-none-any.whl#sha256=2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0 foundationdb @ https://agent-int-packages.datadoghq.com/built/foundationdb/foundationdb-6.3.24-20240402154840-py3-none-manylinux2014_x86_64.whl#sha256=14259f824080062cc890965747597ff00a9d6c76a1eb926673fed68a45860ccd -google-auth @ https://agent-int-packages.datadoghq.com/external/google-auth/google_auth-2.36.0-py2.py3-none-any.whl#sha256=51a15d47028b66fd36e5c64a82d2d57480075bccc7da37cde257fc94177a61fb +google-auth @ https://agent-int-packages.datadoghq.com/external/google-auth/google_auth-2.37.0-py2.py3-none-any.whl#sha256=42664f18290a6be591be5329a96fe30184be1a1badb7292a7f686a9659de9ca0 gssapi @ https://agent-int-packages.datadoghq.com/built/gssapi/gssapi-1.9.0-20241016152358-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl#sha256=faa3fd9b4beb4120b47338adf0b1bdaa7dd98bc597318e09bb833057cd7dcdcd hazelcast-python-client @ https://agent-int-packages.datadoghq.com/external/hazelcast-python-client/hazelcast_python_client-5.4.0-py3-none-any.whl#sha256=16195cd58feb2dd3be1594d08d42527ae00797548a6a9d6a601aae2e8514ff5f idna @ https://agent-int-packages.datadoghq.com/external/idna/idna-3.10-py3-none-any.whl#sha256=946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 @@ -54,7 +54,7 @@ msal-extensions @ https://agent-int-packages.datadoghq.com/external/msal-extensi netifaces @ https://agent-int-packages.datadoghq.com/built/netifaces/netifaces-0.11.0-20241015150438-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl#sha256=5e132ccf627697ed6673150de8d2def752eaa7d2e0684226dd04a7d28108eaa1 oauthlib @ https://agent-int-packages.datadoghq.com/external/oauthlib/oauthlib-3.2.2-py3-none-any.whl#sha256=8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca openstacksdk @ https://agent-int-packages.datadoghq.com/external/openstacksdk/openstacksdk-3.3.0-py3-none-any.whl#sha256=e6d4121b87354984caf0e3c032e2ebf4d4440374f86c81c27ec52ca5df359157 -opentelemetry-api @ https://agent-int-packages.datadoghq.com/external/opentelemetry-api/opentelemetry_api-1.28.2-py3-none-any.whl#sha256=6fcec89e265beb258fe6b1acaaa3c8c705a934bd977b9f534a2b7c0d2d4275a6 +opentelemetry-api @ https://agent-int-packages.datadoghq.com/external/opentelemetry-api/opentelemetry_api-1.29.0-py3-none-any.whl#sha256=5fcd94c4141cc49c736271f3e1efb777bebe9cc535759c54c936cca4f1b312b8 orjson @ https://agent-int-packages.datadoghq.com/external/orjson/orjson-3.10.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=a0c6a008e91d10a2564edbb6ee5069a9e66df3fbe11c9a005cb411f441fd2c09 os-service-types @ https://agent-int-packages.datadoghq.com/external/os-service-types/os_service_types-1.7.0-py2.py3-none-any.whl#sha256=0505c72205690910077fb72b88f2a1f07533c8d39f2fe75b29583481764965d6 packaging @ https://agent-int-packages.datadoghq.com/external/packaging/packaging-24.1-py3-none-any.whl#sha256=5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 @@ -80,7 +80,7 @@ pymongo @ https://agent-int-packages.datadoghq.com/external/pymongo/pymongo-4.8. pymqi @ https://agent-int-packages.datadoghq.com/built/pymqi/pymqi-1.12.10-20241015150439-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl#sha256=4508866a0ddd5e4c591a2041dd562b2efee1aa747a008cae1aa128e9e977a786 pymysql @ https://agent-int-packages.datadoghq.com/external/pymysql/PyMySQL-1.1.1-py3-none-any.whl#sha256=4de15da4c61dc132f4fb9ab763063e693d521a80fd0e87943b9a453dd4c19d6c pynacl @ https://agent-int-packages.datadoghq.com/external/pynacl/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858 -pyodbc @ https://agent-int-packages.datadoghq.com/built/pyodbc/pyodbc-5.1.0-20241015150439-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=1e0c322487e65a6a72f94d79eb72f2be95f4c7d693ed0329517b47790cf7c23d +pyodbc @ https://agent-int-packages.datadoghq.com/built/pyodbc/pyodbc-5.1.0-20241216144615-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=7119c47c02a52bf4d9a7d24f53a5dd4cefdcf0f067519ac1ffafe0d252f0de83 pyopenssl @ https://agent-int-packages.datadoghq.com/external/pyopenssl/pyOpenSSL-24.2.1-py3-none-any.whl#sha256=967d5719b12b243588573f39b0c677637145c7a1ffedcd495a487e58177fbb8d pysmi @ https://agent-int-packages.datadoghq.com/external/pysmi/pysmi-1.2.1-py3-none-any.whl#sha256=d97c60de9f81d33ab2899124d95a94fa7fefacc86ab6e00cbfec543a073e6d33 pysnmp @ https://agent-int-packages.datadoghq.com/external/pysnmp/pysnmp-5.1.0-py3-none-any.whl#sha256=375a8adfc6820faf24ace6761a6d20544e60580d714ff7266df272850c39b439 diff --git a/.deps/resolved/macos-x86_64_3.12.txt b/.deps/resolved/macos-x86_64_3.12.txt index 734802d8f7240..d4d095573dd0f 100644 --- a/.deps/resolved/macos-x86_64_3.12.txt +++ b/.deps/resolved/macos-x86_64_3.12.txt @@ -1,6 +1,6 @@ annotated-types @ https://agent-int-packages.datadoghq.com/external/annotated-types/annotated_types-0.7.0-py3-none-any.whl#sha256=1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 asn1crypto @ https://agent-int-packages.datadoghq.com/external/asn1crypto/asn1crypto-1.5.1-py2.py3-none-any.whl#sha256=db4e40728b728508912cbb3d44f19ce188f218e9eba635821bb4b68564f8fd67 -attrs @ https://agent-int-packages.datadoghq.com/external/attrs/attrs-24.2.0-py3-none-any.whl#sha256=81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2 +attrs @ https://agent-int-packages.datadoghq.com/external/attrs/attrs-24.3.0-py3-none-any.whl#sha256=ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308 aws-requests-auth @ https://agent-int-packages.datadoghq.com/external/aws-requests-auth/aws_requests_auth-0.4.3-py2.py3-none-any.whl#sha256=646bc37d62140ea1c709d20148f5d43197e6bd2d63909eb36fa4bb2345759977 azure-core @ https://agent-int-packages.datadoghq.com/external/azure-core/azure_core-1.32.0-py3-none-any.whl#sha256=eac191a0efb23bfa83fddf321b27b122b4ec847befa3091fa736a5c32c50d7b4 azure-identity @ https://agent-int-packages.datadoghq.com/external/azure-identity/azure_identity-1.17.1-py3-none-any.whl#sha256=db8d59c183b680e763722bfe8ebc45930e6c57df510620985939f7f3191e0382 @@ -12,13 +12,13 @@ botocore @ https://agent-int-packages.datadoghq.com/external/botocore/botocore-1 bytecode @ https://agent-int-packages.datadoghq.com/external/bytecode/bytecode-0.16.0-py3-none-any.whl#sha256=76080b7c0eb9e7e17f961d61fd06e933aa47f3b753770a3249537439d8203a25 cachetools @ https://agent-int-packages.datadoghq.com/external/cachetools/cachetools-5.5.0-py3-none-any.whl#sha256=02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292 cattrs @ https://agent-int-packages.datadoghq.com/external/cattrs/cattrs-24.1.2-py3-none-any.whl#sha256=67c7495b760168d931a10233f979b28dc04daf853b30752246f4f8471c6d68d0 -certifi @ https://agent-int-packages.datadoghq.com/external/certifi/certifi-2024.8.30-py3-none-any.whl#sha256=922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8 +certifi @ https://agent-int-packages.datadoghq.com/external/certifi/certifi-2024.12.14-py3-none-any.whl#sha256=1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56 cffi @ https://agent-int-packages.datadoghq.com/external/cffi/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl#sha256=805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4 charset-normalizer @ https://agent-int-packages.datadoghq.com/external/charset-normalizer/charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl#sha256=de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf clickhouse-cityhash @ https://agent-int-packages.datadoghq.com/external/clickhouse-cityhash/clickhouse_cityhash-1.0.2.4-cp312-cp312-macosx_10_9_x86_64.whl#sha256=261fc1b0bf349de66b2d9e3d367879a561b516ca8e54e85e0c27b7c1a4f639b4 clickhouse-driver @ https://agent-int-packages.datadoghq.com/external/clickhouse-driver/clickhouse_driver-0.2.9-cp312-cp312-macosx_10_9_x86_64.whl#sha256=fcb2fd00e58650ae206a6d5dbc83117240e622471aa5124733fbf2805eb8bda0 -cm-client @ https://agent-int-packages.datadoghq.com/built/cm-client/cm_client-45.0.4-20240402154932-py3-none-macosx_10_12_universal2.whl#sha256=aba3c1683ef1b2099933e030464d29b3ad1c206784ebd15d8a7147ecd6ba24e1 -confluent-kafka @ https://agent-int-packages.datadoghq.com/built/confluent-kafka/confluent_kafka-2.6.1-20241205195042-cp312-cp312-macosx_10_13_universal2.whl#sha256=380f7e6798592b4ebbcb5697cd3b6c0aa15e2d5d48af70396461780a6b7e854e +cm-client @ https://agent-int-packages.datadoghq.com/built/cm-client/cm_client-45.0.4-20241216144620-py3-none-macosx_10_12_universal2.whl#sha256=72f55306e2e3df9291ee55e3a6b2f6698fe3999db9570a14da0ea56bbf51e5a9 +confluent-kafka @ https://agent-int-packages.datadoghq.com/built/confluent-kafka/confluent_kafka-2.6.1-20241217110206-cp312-cp312-macosx_10_13_universal2.whl#sha256=88ae1ef90bd70a5b83c7890890ebc736dc02e9246a01b2e21840ffd69eac31dd cryptography @ https://agent-int-packages.datadoghq.com/external/cryptography/cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl#sha256=ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d ddsketch @ https://agent-int-packages.datadoghq.com/external/ddsketch/ddsketch-3.0.1-py3-none-any.whl#sha256=6d047b455fe2837c43d366ff1ae6ba0c3166e15499de8688437a75cea914224e ddtrace @ https://agent-int-packages.datadoghq.com/external/ddtrace/ddtrace-2.10.6-cp312-cp312-macosx_12_0_x86_64.whl#sha256=401f77b0564c3f990b58b9f21055331ca9efcdfa06dfa6ccff13cf21f8329ba5 @@ -28,8 +28,8 @@ dnspython @ https://agent-int-packages.datadoghq.com/external/dnspython/dnspytho dogpile-cache @ https://agent-int-packages.datadoghq.com/external/dogpile-cache/dogpile.cache-1.3.3-py3-none-any.whl#sha256=5e211c4902ebdf88c678d268e22454b41e68071632daa9402d8ee24e825ed8ca envier @ https://agent-int-packages.datadoghq.com/external/envier/envier-0.6.1-py3-none-any.whl#sha256=73609040a76be48bbcb97074d9969666484aa0de706183a6e9ef773156a8a6a9 filelock @ https://agent-int-packages.datadoghq.com/external/filelock/filelock-3.16.1-py3-none-any.whl#sha256=2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0 -foundationdb @ https://agent-int-packages.datadoghq.com/built/foundationdb/foundationdb-6.3.24-20240402154934-py3-none-macosx_10_12_universal2.whl#sha256=14259f824080062cc890965747597ff00a9d6c76a1eb926673fed68a45860ccd -google-auth @ https://agent-int-packages.datadoghq.com/external/google-auth/google_auth-2.36.0-py2.py3-none-any.whl#sha256=51a15d47028b66fd36e5c64a82d2d57480075bccc7da37cde257fc94177a61fb +foundationdb @ https://agent-int-packages.datadoghq.com/built/foundationdb/foundationdb-6.3.24-20241216144621-py3-none-macosx_10_12_universal2.whl#sha256=224873f8472b825a2fb7aba2e8ecf7893303101f7536ec1b96ff7a42ff63d8e1 +google-auth @ https://agent-int-packages.datadoghq.com/external/google-auth/google_auth-2.37.0-py2.py3-none-any.whl#sha256=42664f18290a6be591be5329a96fe30184be1a1badb7292a7f686a9659de9ca0 gssapi @ https://agent-int-packages.datadoghq.com/external/gssapi/gssapi-1.9.0-cp312-cp312-macosx_10_13_x86_64.whl#sha256=b66a98827fbd2864bf8993677a039d7ba4a127ca0d2d9ed73e0ef4f1baa7fd7f hazelcast-python-client @ https://agent-int-packages.datadoghq.com/external/hazelcast-python-client/hazelcast_python_client-5.4.0-py3-none-any.whl#sha256=16195cd58feb2dd3be1594d08d42527ae00797548a6a9d6a601aae2e8514ff5f idna @ https://agent-int-packages.datadoghq.com/external/idna/idna-3.10-py3-none-any.whl#sha256=946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 @@ -50,10 +50,10 @@ lz4 @ https://agent-int-packages.datadoghq.com/external/lz4/lz4-4.3.3-cp312-cp31 mmh3 @ https://agent-int-packages.datadoghq.com/external/mmh3/mmh3-4.1.0-cp312-cp312-macosx_10_9_x86_64.whl#sha256=d6af3e2287644b2b08b5924ed3a88c97b87b44ad08e79ca9f93d3470a54a41c5 msal @ https://agent-int-packages.datadoghq.com/external/msal/msal-1.31.1-py3-none-any.whl#sha256=29d9882de247e96db01386496d59f29035e5e841bcac892e6d7bf4390bf6bd17 msal-extensions @ https://agent-int-packages.datadoghq.com/external/msal-extensions/msal_extensions-1.2.0-py3-none-any.whl#sha256=cf5ba83a2113fa6dc011a254a72f1c223c88d7dfad74cc30617c4679a417704d -netifaces @ https://agent-int-packages.datadoghq.com/built/netifaces/netifaces-0.11.0-20241205195042-cp312-cp312-macosx_10_13_universal2.whl#sha256=66a155ae114ae885a4a15604cd39e93e12c7dc024132de958e10ced6f375856a +netifaces @ https://agent-int-packages.datadoghq.com/built/netifaces/netifaces-0.11.0-20241217110207-cp312-cp312-macosx_10_13_universal2.whl#sha256=b9bed2e4521f4546495e04d8f2b95085a9c83e659ec5785a084e8659f3b57889 oauthlib @ https://agent-int-packages.datadoghq.com/external/oauthlib/oauthlib-3.2.2-py3-none-any.whl#sha256=8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca openstacksdk @ https://agent-int-packages.datadoghq.com/external/openstacksdk/openstacksdk-3.3.0-py3-none-any.whl#sha256=e6d4121b87354984caf0e3c032e2ebf4d4440374f86c81c27ec52ca5df359157 -opentelemetry-api @ https://agent-int-packages.datadoghq.com/external/opentelemetry-api/opentelemetry_api-1.28.2-py3-none-any.whl#sha256=6fcec89e265beb258fe6b1acaaa3c8c705a934bd977b9f534a2b7c0d2d4275a6 +opentelemetry-api @ https://agent-int-packages.datadoghq.com/external/opentelemetry-api/opentelemetry_api-1.29.0-py3-none-any.whl#sha256=5fcd94c4141cc49c736271f3e1efb777bebe9cc535759c54c936cca4f1b312b8 orjson @ https://agent-int-packages.datadoghq.com/external/orjson/orjson-3.10.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl#sha256=44a96f2d4c3af51bfac6bc4ef7b182aa33f2f054fd7f34cc0ee9a320d051d41f os-service-types @ https://agent-int-packages.datadoghq.com/external/os-service-types/os_service_types-1.7.0-py2.py3-none-any.whl#sha256=0505c72205690910077fb72b88f2a1f07533c8d39f2fe75b29583481764965d6 packaging @ https://agent-int-packages.datadoghq.com/external/packaging/packaging-24.1-py3-none-any.whl#sha256=5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 @@ -76,7 +76,7 @@ pydantic @ https://agent-int-packages.datadoghq.com/external/pydantic/pydantic-2 pydantic-core @ https://agent-int-packages.datadoghq.com/external/pydantic-core/pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl#sha256=595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231 pyjwt @ https://agent-int-packages.datadoghq.com/external/pyjwt/PyJWT-2.9.0-py3-none-any.whl#sha256=3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850 pymongo @ https://agent-int-packages.datadoghq.com/external/pymongo/pymongo-4.8.0-cp312-cp312-macosx_10_9_x86_64.whl#sha256=e6a720a3d22b54183352dc65f08cd1547204d263e0651b213a0a2e577e838526 -pymqi @ https://agent-int-packages.datadoghq.com/built/pymqi/pymqi-1.12.10-20241205195043-cp312-cp312-macosx_10_13_universal2.whl#sha256=b436d180ff1d3ffa5094a610721038aa678155038597351179b19ddeb28f507d +pymqi @ https://agent-int-packages.datadoghq.com/built/pymqi/pymqi-1.12.10-20241217110207-cp312-cp312-macosx_10_13_universal2.whl#sha256=8e35a9b5f25877bbc08c2c17491e239780f10009a00c991d7099d9fbb8699e7d pymysql @ https://agent-int-packages.datadoghq.com/external/pymysql/PyMySQL-1.1.1-py3-none-any.whl#sha256=4de15da4c61dc132f4fb9ab763063e693d521a80fd0e87943b9a453dd4c19d6c pynacl @ https://agent-int-packages.datadoghq.com/external/pynacl/PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl#sha256=401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1 pyodbc @ https://agent-int-packages.datadoghq.com/external/pyodbc/pyodbc-5.1.0-cp312-cp312-macosx_10_9_x86_64.whl#sha256=d3d9cc4af703c4817b6e604315910b0cf5dcb68056d52b25ca072dd59c52dcbc @@ -91,7 +91,7 @@ python-binary-memcached @ https://agent-int-packages.datadoghq.com/external/pyth python-dateutil @ https://agent-int-packages.datadoghq.com/external/python-dateutil/python_dateutil-2.9.0.post0-py2.py3-none-any.whl#sha256=a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 python3-gearman @ https://agent-int-packages.datadoghq.com/external/python3-gearman/python3_gearman-0.1.0-py3-none-any.whl#sha256=4a5808d3a0bfc6c243548ad57e7aab4bee62c9cba2b1c3a860fdd292d46a112d pytz @ https://agent-int-packages.datadoghq.com/external/pytz/pytz-2024.2-py2.py3-none-any.whl#sha256=31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725 -pyvmomi @ https://agent-int-packages.datadoghq.com/built/pyvmomi/pyvmomi-8.0.3.0.1-20240702172113-py2.py3-none-macosx_10_12_universal2.whl#sha256=e173daf28895975b57850fef301837f24fba59dd8ff1d931795732e9be281d57 +pyvmomi @ https://agent-int-packages.datadoghq.com/built/pyvmomi/pyvmomi-8.0.3.0.1-20241216144622-py2.py3-none-macosx_10_12_universal2.whl#sha256=0d5c361674512c65adbdc32e05eaffb60be72b246efb3ea1ce2edb10dd6698c8 pyyaml @ https://agent-int-packages.datadoghq.com/external/pyyaml/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl#sha256=c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab redis @ https://agent-int-packages.datadoghq.com/external/redis/redis-5.0.8-py3-none-any.whl#sha256=56134ee08ea909106090934adc36f65c9bcbbaecea5b21ba704ba6fb561f8eb4 requests @ https://agent-int-packages.datadoghq.com/external/requests/requests-2.32.3-py3-none-any.whl#sha256=70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 @@ -120,7 +120,7 @@ tuf @ https://agent-int-packages.datadoghq.com/external/tuf/tuf-4.0.0-py3-none-a typing-extensions @ https://agent-int-packages.datadoghq.com/external/typing-extensions/typing_extensions-4.12.2-py3-none-any.whl#sha256=04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d tzlocal @ https://agent-int-packages.datadoghq.com/external/tzlocal/tzlocal-5.2-py3-none-any.whl#sha256=49816ef2fe65ea8ac19d19aa7a1ae0551c834303d5014c6d5a62e4cbda8047b8 uhashring @ https://agent-int-packages.datadoghq.com/external/uhashring/uhashring-2.3-py3-none-any.whl#sha256=7ee8a25ca495a97effad10bd563c83b4054a6d7606d9530757049a04edab9297 -uptime @ https://agent-int-packages.datadoghq.com/built/uptime/uptime-3.0.1-20241205195044-cp312-cp312-macosx_10_13_universal2.whl#sha256=63083f38ee611a28940cfa670df6a45df4d2d8b1960640c5480febac2f326134 +uptime @ https://agent-int-packages.datadoghq.com/built/uptime/uptime-3.0.1-20241217110208-cp312-cp312-macosx_10_13_universal2.whl#sha256=f58a728a54278b54a114f7d4afb5864e629cf7c4afd070f5468bb9fd6d118653 urllib3 @ https://agent-int-packages.datadoghq.com/external/urllib3/urllib3-2.2.3-py3-none-any.whl#sha256=ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac vertica-python @ https://agent-int-packages.datadoghq.com/external/vertica-python/vertica_python-1.4.0-py3-none-any.whl#sha256=50fecd7687f4b0b9f6dee6e2b35c195af2a4f702ece01bd12e080b51756e000b websocket-client @ https://agent-int-packages.datadoghq.com/external/websocket-client/websocket_client-1.8.0-py3-none-any.whl#sha256=17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526 diff --git a/.deps/resolved/windows-x86_64_3.12.txt b/.deps/resolved/windows-x86_64_3.12.txt index 67094404c3e2f..47418fd5b4ddb 100644 --- a/.deps/resolved/windows-x86_64_3.12.txt +++ b/.deps/resolved/windows-x86_64_3.12.txt @@ -1,6 +1,6 @@ annotated-types @ https://agent-int-packages.datadoghq.com/external/annotated-types/annotated_types-0.7.0-py3-none-any.whl#sha256=1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 asn1crypto @ https://agent-int-packages.datadoghq.com/external/asn1crypto/asn1crypto-1.5.1-py2.py3-none-any.whl#sha256=db4e40728b728508912cbb3d44f19ce188f218e9eba635821bb4b68564f8fd67 -attrs @ https://agent-int-packages.datadoghq.com/external/attrs/attrs-24.2.0-py3-none-any.whl#sha256=81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2 +attrs @ https://agent-int-packages.datadoghq.com/external/attrs/attrs-24.3.0-py3-none-any.whl#sha256=ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308 aws-requests-auth @ https://agent-int-packages.datadoghq.com/external/aws-requests-auth/aws_requests_auth-0.4.3-py2.py3-none-any.whl#sha256=646bc37d62140ea1c709d20148f5d43197e6bd2d63909eb36fa4bb2345759977 azure-core @ https://agent-int-packages.datadoghq.com/external/azure-core/azure_core-1.32.0-py3-none-any.whl#sha256=eac191a0efb23bfa83fddf321b27b122b4ec847befa3091fa736a5c32c50d7b4 azure-identity @ https://agent-int-packages.datadoghq.com/external/azure-identity/azure_identity-1.17.1-py3-none-any.whl#sha256=db8d59c183b680e763722bfe8ebc45930e6c57df510620985939f7f3191e0382 @@ -12,13 +12,13 @@ botocore @ https://agent-int-packages.datadoghq.com/external/botocore/botocore-1 bytecode @ https://agent-int-packages.datadoghq.com/external/bytecode/bytecode-0.16.0-py3-none-any.whl#sha256=76080b7c0eb9e7e17f961d61fd06e933aa47f3b753770a3249537439d8203a25 cachetools @ https://agent-int-packages.datadoghq.com/external/cachetools/cachetools-5.5.0-py3-none-any.whl#sha256=02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292 cattrs @ https://agent-int-packages.datadoghq.com/external/cattrs/cattrs-24.1.2-py3-none-any.whl#sha256=67c7495b760168d931a10233f979b28dc04daf853b30752246f4f8471c6d68d0 -certifi @ https://agent-int-packages.datadoghq.com/external/certifi/certifi-2024.8.30-py3-none-any.whl#sha256=922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8 +certifi @ https://agent-int-packages.datadoghq.com/external/certifi/certifi-2024.12.14-py3-none-any.whl#sha256=1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56 cffi @ https://agent-int-packages.datadoghq.com/external/cffi/cffi-1.17.1-cp312-cp312-win_amd64.whl#sha256=51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903 charset-normalizer @ https://agent-int-packages.datadoghq.com/external/charset-normalizer/charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl#sha256=b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9 clickhouse-cityhash @ https://agent-int-packages.datadoghq.com/external/clickhouse-cityhash/clickhouse_cityhash-1.0.2.4-cp312-cp312-win_amd64.whl#sha256=0409917be29f5ad80a6772712fce954b5e81450555636e8523290ee9740a2dbb clickhouse-driver @ https://agent-int-packages.datadoghq.com/external/clickhouse-driver/clickhouse_driver-0.2.9-cp312-cp312-win_amd64.whl#sha256=de6624e28eeffd01668803d28ae89e3d4e359b1bff8b60e4933e1cb3c6f86f18 cm-client @ https://agent-int-packages.datadoghq.com/built/cm-client/cm_client-45.0.4-20240402154627-py3-none-win_amd64.whl#sha256=1743b32a221d2a0804b4e425ffd53468e8f1754da217fe1e7bd9ff7800fd90f8 -confluent-kafka @ https://agent-int-packages.datadoghq.com/external/confluent-kafka/confluent_kafka-2.6.1-cp312-cp312-win_amd64.whl#sha256=b17da915fc35b1bef49d599f685656f65f379094dbbc7aafc5ede1843cc72699 +confluent-kafka @ https://agent-int-packages.datadoghq.com/built/confluent-kafka/confluent_kafka-2.6.1-20241217110200-cp312-cp312-win_amd64.whl#sha256=d01ab115eeacd12a46bb6a0257d3795659410950359c9b8b41ed5a506d7a00d7 cryptography @ https://agent-int-packages.datadoghq.com/external/cryptography/cryptography-43.0.1-cp39-abi3-win_amd64.whl#sha256=d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb ddsketch @ https://agent-int-packages.datadoghq.com/external/ddsketch/ddsketch-3.0.1-py3-none-any.whl#sha256=6d047b455fe2837c43d366ff1ae6ba0c3166e15499de8688437a75cea914224e ddtrace @ https://agent-int-packages.datadoghq.com/external/ddtrace/ddtrace-2.10.6-cp312-cp312-win_amd64.whl#sha256=bb183a535e5b24828a45901babd9fd15a1350c9d5096de5ba463287d0c8c64d1 @@ -29,7 +29,7 @@ dogpile-cache @ https://agent-int-packages.datadoghq.com/external/dogpile-cache/ envier @ https://agent-int-packages.datadoghq.com/external/envier/envier-0.6.1-py3-none-any.whl#sha256=73609040a76be48bbcb97074d9969666484aa0de706183a6e9ef773156a8a6a9 filelock @ https://agent-int-packages.datadoghq.com/external/filelock/filelock-3.16.1-py3-none-any.whl#sha256=2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0 foundationdb @ https://agent-int-packages.datadoghq.com/built/foundationdb/foundationdb-6.3.24-20240402154628-py3-none-win_amd64.whl#sha256=07e8e97e51dc9248d58d60d33076b82380135c31ab3727a33b885cea17e34bc7 -google-auth @ https://agent-int-packages.datadoghq.com/external/google-auth/google_auth-2.36.0-py2.py3-none-any.whl#sha256=51a15d47028b66fd36e5c64a82d2d57480075bccc7da37cde257fc94177a61fb +google-auth @ https://agent-int-packages.datadoghq.com/external/google-auth/google_auth-2.37.0-py2.py3-none-any.whl#sha256=42664f18290a6be591be5329a96fe30184be1a1badb7292a7f686a9659de9ca0 hazelcast-python-client @ https://agent-int-packages.datadoghq.com/external/hazelcast-python-client/hazelcast_python_client-5.4.0-py3-none-any.whl#sha256=16195cd58feb2dd3be1594d08d42527ae00797548a6a9d6a601aae2e8514ff5f idna @ https://agent-int-packages.datadoghq.com/external/idna/idna-3.10-py3-none-any.whl#sha256=946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 importlib-metadata @ https://agent-int-packages.datadoghq.com/external/importlib-metadata/importlib_metadata-8.5.0-py3-none-any.whl#sha256=45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b @@ -51,7 +51,7 @@ msal-extensions @ https://agent-int-packages.datadoghq.com/external/msal-extensi netifaces @ https://agent-int-packages.datadoghq.com/built/netifaces/netifaces-0.11.0-20240830145553-cp312-cp312-win_amd64.whl#sha256=a1ba522e63fb6b220e7fe668767f334662afa9c56eca18b361bd1f88863ab59a oauthlib @ https://agent-int-packages.datadoghq.com/external/oauthlib/oauthlib-3.2.2-py3-none-any.whl#sha256=8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca openstacksdk @ https://agent-int-packages.datadoghq.com/external/openstacksdk/openstacksdk-3.3.0-py3-none-any.whl#sha256=e6d4121b87354984caf0e3c032e2ebf4d4440374f86c81c27ec52ca5df359157 -opentelemetry-api @ https://agent-int-packages.datadoghq.com/external/opentelemetry-api/opentelemetry_api-1.28.2-py3-none-any.whl#sha256=6fcec89e265beb258fe6b1acaaa3c8c705a934bd977b9f534a2b7c0d2d4275a6 +opentelemetry-api @ https://agent-int-packages.datadoghq.com/external/opentelemetry-api/opentelemetry_api-1.29.0-py3-none-any.whl#sha256=5fcd94c4141cc49c736271f3e1efb777bebe9cc535759c54c936cca4f1b312b8 orjson @ https://agent-int-packages.datadoghq.com/external/orjson/orjson-3.10.7-cp312-none-win_amd64.whl#sha256=1d9c0e733e02ada3ed6098a10a8ee0052dd55774de3d9110d29868d24b17faa1 os-service-types @ https://agent-int-packages.datadoghq.com/external/os-service-types/os_service_types-1.7.0-py2.py3-none-any.whl#sha256=0505c72205690910077fb72b88f2a1f07533c8d39f2fe75b29583481764965d6 packaging @ https://agent-int-packages.datadoghq.com/external/packaging/packaging-24.1-py3-none-any.whl#sha256=5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d8c26b7baf5be..e6c09e4e626b2 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -243,6 +243,17 @@ datadog_checks_base/datadog_checks/base/checks/windows/ @DataDog/wi /wmi_check/manifest.json @DataDog/windows-agent @DataDog/agent-integrations @DataDog/documentation # SaaS Integrations +/authorize_net/ @DataDog/saas-integrations +/authorize_net/*.md @DataDog/saas-integrations @DataDog/documentation +/authorize_net/manifest.json @DataDog/saas-integrations @DataDog/documentation +/authorize_net/metadata.csv @DataDog/saas-integrations @DataDog/documentation +/authorize_net/assets/logs/ @DataDog/saas-integrations @DataDog/documentation @DataDog/logs-backend + +/brevo/ @DataDog/saas-integrations +/brevo/*.md @DataDog/saas-integrations @DataDog/documentation +/brevo/manifest.json @DataDog/saas-integrations @DataDog/documentation +/brevo/assets/logs/ @DataDog/saas-integrations @DataDog/documentation @DataDog/logs-backend + /cisco_umbrella_dns/ @DataDog/saas-integrations /cisco_umbrella_dns/*.md @DataDog/saas-integrations @DataDog/documentation /cisco_umbrella_dns/manifest.json @DataDog/saas-integrations @DataDog/documentation @@ -361,6 +372,11 @@ datadog_checks_base/datadog_checks/base/checks/windows/ @DataDog/wi /temporal_cloud/manifest.json @DataDog/saas-integrations @DataDog/documentation /temporal_cloud/metadata.csv @DataDog/saas-integrations @DataDog/documentation +/temporal_cloud/ @DataDog/saas-integrations +/temporal_cloud/*.md @DataDog/saas-integrations @DataDog/documentation +/temporal_cloud/manifest.json @DataDog/saas-integrations @DataDog/documentation +/temporal_cloud/metadata.csv @DataDog/saas-integrations @DataDog/documentation + /trend_micro_email_security/ @DataDog/saas-integrations /trend_micro_email_security/*.md @DataDog/saas-integrations @DataDog/documentation /trend_micro_email_security/manifest.json @DataDog/saas-integrations @DataDog/documentation diff --git a/.github/workflows/config/labeler.yml b/.github/workflows/config/labeler.yml index a8a9d564a7b10..7b5f903cc233b 100644 --- a/.github/workflows/config/labeler.yml +++ b/.github/workflows/config/labeler.yml @@ -73,6 +73,8 @@ integration/asana: - asana/**/* integration/aspdotnet: - aspdotnet/**/* +integration/authorize_net: +- authorize_net/**/* integration/avast: - avast/**/* integration/avi_vantage: @@ -85,6 +87,8 @@ integration/azure_iot_edge: - azure_iot_edge/**/* integration/boundary: - boundary/**/* +integration/brevo: +- brevo/**/* integration/btrfs: - btrfs/**/* integration/cacti: @@ -439,6 +443,8 @@ integration/proxysql: - proxysql/**/* integration/pulsar: - pulsar/**/* +integration/quarkus: +- quarkus/**/* integration/rabbitmq: - rabbitmq/**/* integration/ray: diff --git a/.github/workflows/test-all.yml b/.github/workflows/test-all.yml index 606f6125af01e..d1701ec09df8b 100644 --- a/.github/workflows/test-all.yml +++ b/.github/workflows/test-all.yml @@ -2994,6 +2994,26 @@ jobs: minimum-base-package: ${{ inputs.minimum-base-package }} pytest-args: ${{ inputs.pytest-args }} secrets: inherit + jcc156e5: + uses: ./.github/workflows/test-target.yml + with: + job-name: Quarkus + target: quarkus + platform: linux + runner: '["ubuntu-22.04"]' + repo: "${{ inputs.repo }}" + python-version: "${{ inputs.python-version }}" + standard: ${{ inputs.standard }} + latest: ${{ inputs.latest }} + agent-image: "${{ inputs.agent-image }}" + agent-image-py2: "${{ inputs.agent-image-py2 }}" + agent-image-windows: "${{ inputs.agent-image-windows }}" + agent-image-windows-py2: "${{ inputs.agent-image-windows-py2 }}" + test-py2: ${{ inputs.test-py2 }} + test-py3: ${{ inputs.test-py3 }} + minimum-base-package: ${{ inputs.minimum-base-package }} + pytest-args: ${{ inputs.pytest-args }} + secrets: inherit j694032b: uses: ./.github/workflows/test-target.yml with: diff --git a/AGENT_CHANGELOG.md b/AGENT_CHANGELOG.md index bff904b2455a0..5fc1284d2090b 100644 --- a/AGENT_CHANGELOG.md +++ b/AGENT_CHANGELOG.md @@ -1,3 +1,24 @@ +## Datadog Agent version [7.60.1](https://github.com/DataDog/datadog-agent/blob/master/CHANGELOG.rst#7601) + +* There were no integration updates for this version of the Agent. + +## Datadog Agent version [7.60.0](https://github.com/DataDog/datadog-agent/blob/master/CHANGELOG.rst#7600) + +* Datadog Checks Base [37.2.0](https://github.com/DataDog/integrations-core/blob/master/datadog_checks_base/CHANGELOG.md) +* Datadog Checks Downloader [6.1.0](https://github.com/DataDog/integrations-core/blob/master/datadog_checks_downloader/CHANGELOG.md) +* Cisco ACI [4.1.0](https://github.com/DataDog/integrations-core/blob/master/cisco_aci/CHANGELOG.md) +* Cloudera [3.2.0](https://github.com/DataDog/integrations-core/blob/master/cloudera/CHANGELOG.md) +* Datadog Cluster Agent [5.1.0](https://github.com/DataDog/integrations-core/blob/master/datadog_cluster_agent/CHANGELOG.md) +* Kafka Consumer [6.1.0](https://github.com/DataDog/integrations-core/blob/master/kafka_consumer/CHANGELOG.md) +* MongoDB [8.2.1](https://github.com/DataDog/integrations-core/blob/master/mongo/CHANGELOG.md) +* MySQL [14.2.0](https://github.com/DataDog/integrations-core/blob/master/mysql/CHANGELOG.md) +* Network [5.1.0](https://github.com/DataDog/integrations-core/blob/master/network/CHANGELOG.md) +* Postgres [22.2.0](https://github.com/DataDog/integrations-core/blob/master/postgres/CHANGELOG.md) +* Slurm [1.0.1](https://github.com/DataDog/integrations-core/blob/master/slurm/CHANGELOG.md) +* Spark [6.1.0](https://github.com/DataDog/integrations-core/blob/master/spark/CHANGELOG.md) +* SQL Server [20.1.1](https://github.com/DataDog/integrations-core/blob/master/sqlserver/CHANGELOG.md) +* vSphere [8.0.1](https://github.com/DataDog/integrations-core/blob/master/vsphere/CHANGELOG.md) + ## Datadog Agent version [7.59.0](https://github.com/DataDog/datadog-agent/blob/master/CHANGELOG.rst#7590) * Active Directory [4.0.0](https://github.com/DataDog/integrations-core/blob/master/active_directory/CHANGELOG.md) **BREAKING CHANGE** diff --git a/AGENT_INTEGRATIONS.md b/AGENT_INTEGRATIONS.md index fe45bc20ce74d..39805f6cabdba 100644 --- a/AGENT_INTEGRATIONS.md +++ b/AGENT_INTEGRATIONS.md @@ -1,3 +1,417 @@ +## Datadog Agent version 7.60.1 + +* datadog-active-directory: 4.0.0 +* datadog-activemq-xml: 5.0.0 +* datadog-activemq: 5.0.0 +* datadog-aerospike: 4.0.0 +* datadog-airflow: 6.1.0 +* datadog-amazon-msk: 6.0.0 +* datadog-ambari: 6.0.0 +* datadog-apache: 6.0.0 +* datadog-appgate-sdp: 1.0.0 +* datadog-arangodb: 3.1.0 +* datadog-argo-rollouts: 2.1.0 +* datadog-argo-workflows: 2.1.0 +* datadog-argocd: 3.1.0 +* datadog-aspdotnet: 4.0.0 +* datadog-avi-vantage: 5.1.0 +* datadog-aws-neuron: 2.0.1 +* datadog-azure-iot-edge: 6.0.0 +* datadog-boundary: 3.1.0 +* datadog-btrfs: 4.0.0 +* datadog-cacti: 4.0.0 +* datadog-calico: 4.0.0 +* datadog-cassandra-nodetool: 3.0.0 +* datadog-cassandra: 3.0.0 +* datadog-ceph: 4.0.0 +* datadog-cert-manager: 5.1.0 +* datadog-checkpoint-quantum-firewall: 1.0.0 +* datadog-checks-base: 37.2.0 +* datadog-checks-dependency-provider: 3.0.0 +* datadog-checks-downloader: 6.1.0 +* datadog-cilium: 5.0.0 +* datadog-cisco-aci: 4.1.0 +* datadog-cisco-secure-firewall: 1.0.0 +* datadog-citrix-hypervisor: 5.0.0 +* datadog-clickhouse: 5.0.0 +* datadog-cloud-foundry-api: 5.0.0 +* datadog-cloudera: 3.2.0 +* datadog-cockroachdb: 5.0.0 +* datadog-confluent-platform: 3.0.0 +* datadog-consul: 4.0.0 +* datadog-coredns: 5.0.0 +* datadog-couch: 8.0.0 +* datadog-couchbase: 5.0.0 +* datadog-crio: 4.0.0 +* datadog-datadog-cluster-agent: 5.1.0 +* datadog-dcgm: 3.1.0 +* datadog-directory: 4.0.0 +* datadog-disk: 7.0.0 +* datadog-dns-check: 5.0.0 +* datadog-dotnetclr: 4.0.0 +* datadog-druid: 4.0.0 +* datadog-ecs-fargate: 6.0.0 +* datadog-eks-fargate: 6.0.0 +* datadog-elastic: 8.0.0 +* datadog-envoy: 5.0.0 +* datadog-esxi: 3.0.0 +* datadog-etcd: 8.0.0 +* datadog-exchange-server: 4.0.0 +* datadog-external-dns: 5.0.0 +* datadog-flink: 3.0.0 +* datadog-fluentd: 5.0.0 +* datadog-fluxcd: 2.1.0 +* datadog-fly-io: 2.0.1 +* datadog-foundationdb: 3.0.0 +* datadog-gearmand: 5.0.0 +* datadog-gitlab-runner: 6.0.0 +* datadog-gitlab: 9.0.0 +* datadog-glusterfs: 3.0.1 +* datadog-go-expvar: 4.0.0 +* datadog-gunicorn: 4.0.0 +* datadog-haproxy: 7.0.0 +* datadog-harbor: 5.0.0 +* datadog-hazelcast: 6.0.0 +* datadog-hdfs-datanode: 6.0.0 +* datadog-hdfs-namenode: 6.0.0 +* datadog-hive: 2.1.0 +* datadog-hivemq: 2.1.0 +* datadog-http-check: 11.0.0 +* datadog-hudi: 4.0.0 +* datadog-hyperv: 3.0.0 +* datadog-ibm-ace: 4.0.0 +* datadog-ibm-db2: 4.0.0 +* datadog-ibm-i: 4.0.0 +* datadog-ibm-mq: 8.0.0 +* datadog-ibm-was: 5.0.0 +* datadog-ignite: 3.1.0 +* datadog-iis: 5.0.0 +* datadog-impala: 3.1.0 +* datadog-istio: 8.0.0 +* datadog-jboss-wildfly: 3.1.0 +* datadog-journald: 3.0.0 +* datadog-kafka-consumer: 6.1.0 +* datadog-kafka: 4.0.0 +* datadog-karpenter: 2.1.0 +* datadog-kong: 5.0.0 +* datadog-kube-apiserver-metrics: 6.0.0 +* datadog-kube-controller-manager: 7.0.0 +* datadog-kube-dns: 6.0.0 +* datadog-kube-metrics-server: 5.0.0 +* datadog-kube-proxy: 8.0.0 +* datadog-kube-scheduler: 6.0.0 +* datadog-kubeflow: 1.0.0 +* datadog-kubelet: 9.0.0 +* datadog-kubernetes-cluster-autoscaler: 2.1.0 +* datadog-kubernetes-state: 10.0.0 +* datadog-kubevirt-api: 1.0.0 +* datadog-kubevirt-controller: 1.0.0 +* datadog-kubevirt-handler: 1.0.0 +* datadog-kyototycoon: 4.0.0 +* datadog-kyverno: 2.1.0 +* datadog-lighttpd: 5.0.0 +* datadog-linkerd: 6.0.0 +* datadog-linux-proc-extras: 4.0.0 +* datadog-mapr: 3.0.0 +* datadog-mapreduce: 6.0.0 +* datadog-marathon: 4.0.0 +* datadog-marklogic: 6.0.0 +* datadog-mcache: 6.0.0 +* datadog-mesos-master: 5.0.0 +* datadog-mesos-slave: 5.0.0 +* datadog-mongo: 8.2.1 +* datadog-mysql: 14.2.0 +* datadog-nagios: 3.0.0 +* datadog-network: 5.1.0 +* datadog-nfsstat: 3.0.0 +* datadog-nginx-ingress-controller: 4.0.0 +* datadog-nginx: 8.0.0 +* datadog-nvidia-triton: 2.1.0 +* datadog-openldap: 3.0.0 +* datadog-openmetrics: 6.0.0 +* datadog-openstack-controller: 8.0.0 +* datadog-openstack: 4.0.0 +* datadog-oracle: 6.0.0 +* datadog-ossec-security: 2.0.0 +* datadog-palo-alto-panorama: 1.0.0 +* datadog-pan-firewall: 3.0.0 +* datadog-pdh-check: 4.0.0 +* datadog-pgbouncer: 8.0.0 +* datadog-php-fpm: 5.0.0 +* datadog-ping-federate: 2.0.0 +* datadog-postfix: 3.0.0 +* datadog-postgres: 22.2.0 +* datadog-powerdns-recursor: 4.0.0 +* datadog-presto: 3.1.0 +* datadog-process: 5.0.0 +* datadog-prometheus: 5.0.0 +* datadog-proxysql: 7.0.0 +* datadog-pulsar: 3.1.0 +* datadog-rabbitmq: 7.0.0 +* datadog-ray: 2.1.0 +* datadog-redisdb: 7.0.0 +* datadog-rethinkdb: 5.0.0 +* datadog-riak: 5.0.0 +* datadog-riakcs: 4.0.0 +* datadog-sap-hana: 5.0.0 +* datadog-scylla: 4.0.0 +* datadog-sidekiq: 3.0.0 +* datadog-silk: 4.0.0 +* datadog-singlestore: 4.0.0 +* datadog-slurm: 1.0.1 +* datadog-snmp: 9.0.0 +* datadog-snowflake: 7.0.0 +* datadog-solr: 2.1.0 +* datadog-sonarqube: 5.0.0 +* datadog-spark: 6.1.0 +* datadog-sqlserver: 20.1.1 +* datadog-squid: 4.0.0 +* datadog-ssh-check: 4.0.0 +* datadog-statsd: 3.0.0 +* datadog-strimzi: 3.1.0 +* datadog-supervisord: 4.0.0 +* datadog-suricata: 2.0.0 +* datadog-system-core: 4.0.0 +* datadog-system-swap: 3.0.0 +* datadog-tcp-check: 6.0.0 +* datadog-teamcity: 6.0.0 +* datadog-tekton: 2.1.0 +* datadog-teleport: 2.1.0 +* datadog-temporal: 3.1.0 +* datadog-tenable: 3.0.0 +* datadog-teradata: 4.0.0 +* datadog-tibco-ems: 2.1.0 +* datadog-tls: 4.0.0 +* datadog-tokumx: 3.2.0 +* datadog-tomcat: 4.0.0 +* datadog-torchserve: 3.1.0 +* datadog-traefik-mesh: 2.1.0 +* datadog-traffic-server: 3.1.0 +* datadog-twemproxy: 3.0.0 +* datadog-twistlock: 5.0.0 +* datadog-varnish: 4.0.0 +* datadog-vault: 6.0.0 +* datadog-vertica: 6.0.0 +* datadog-vllm: 2.1.0 +* datadog-voltdb: 5.0.0 +* datadog-vsphere: 8.0.1 +* datadog-weaviate: 3.1.0 +* datadog-weblogic: 3.0.0 +* datadog-win32-event-log: 5.0.0 +* datadog-windows-performance-counters: 3.1.0 +* datadog-windows-service: 6.0.0 +* datadog-wmi-check: 3.0.0 +* datadog-yarn: 7.0.0 +* datadog-zeek: 1.0.0 +* datadog-zk: 6.0.0 + +## Datadog Agent version 7.60.0 + +* datadog-active-directory: 4.0.0 +* datadog-activemq-xml: 5.0.0 +* datadog-activemq: 5.0.0 +* datadog-aerospike: 4.0.0 +* datadog-airflow: 6.1.0 +* datadog-amazon-msk: 6.0.0 +* datadog-ambari: 6.0.0 +* datadog-apache: 6.0.0 +* datadog-appgate-sdp: 1.0.0 +* datadog-arangodb: 3.1.0 +* datadog-argo-rollouts: 2.1.0 +* datadog-argo-workflows: 2.1.0 +* datadog-argocd: 3.1.0 +* datadog-aspdotnet: 4.0.0 +* datadog-avi-vantage: 5.1.0 +* datadog-aws-neuron: 2.0.1 +* datadog-azure-iot-edge: 6.0.0 +* datadog-boundary: 3.1.0 +* datadog-btrfs: 4.0.0 +* datadog-cacti: 4.0.0 +* datadog-calico: 4.0.0 +* datadog-cassandra-nodetool: 3.0.0 +* datadog-cassandra: 3.0.0 +* datadog-ceph: 4.0.0 +* datadog-cert-manager: 5.1.0 +* datadog-checkpoint-quantum-firewall: 1.0.0 +* datadog-checks-base: 37.2.0 +* datadog-checks-dependency-provider: 3.0.0 +* datadog-checks-downloader: 6.1.0 +* datadog-cilium: 5.0.0 +* datadog-cisco-aci: 4.1.0 +* datadog-cisco-secure-firewall: 1.0.0 +* datadog-citrix-hypervisor: 5.0.0 +* datadog-clickhouse: 5.0.0 +* datadog-cloud-foundry-api: 5.0.0 +* datadog-cloudera: 3.2.0 +* datadog-cockroachdb: 5.0.0 +* datadog-confluent-platform: 3.0.0 +* datadog-consul: 4.0.0 +* datadog-coredns: 5.0.0 +* datadog-couch: 8.0.0 +* datadog-couchbase: 5.0.0 +* datadog-crio: 4.0.0 +* datadog-datadog-cluster-agent: 5.1.0 +* datadog-dcgm: 3.1.0 +* datadog-directory: 4.0.0 +* datadog-disk: 7.0.0 +* datadog-dns-check: 5.0.0 +* datadog-dotnetclr: 4.0.0 +* datadog-druid: 4.0.0 +* datadog-ecs-fargate: 6.0.0 +* datadog-eks-fargate: 6.0.0 +* datadog-elastic: 8.0.0 +* datadog-envoy: 5.0.0 +* datadog-esxi: 3.0.0 +* datadog-etcd: 8.0.0 +* datadog-exchange-server: 4.0.0 +* datadog-external-dns: 5.0.0 +* datadog-flink: 3.0.0 +* datadog-fluentd: 5.0.0 +* datadog-fluxcd: 2.1.0 +* datadog-fly-io: 2.0.1 +* datadog-foundationdb: 3.0.0 +* datadog-gearmand: 5.0.0 +* datadog-gitlab-runner: 6.0.0 +* datadog-gitlab: 9.0.0 +* datadog-glusterfs: 3.0.1 +* datadog-go-expvar: 4.0.0 +* datadog-gunicorn: 4.0.0 +* datadog-haproxy: 7.0.0 +* datadog-harbor: 5.0.0 +* datadog-hazelcast: 6.0.0 +* datadog-hdfs-datanode: 6.0.0 +* datadog-hdfs-namenode: 6.0.0 +* datadog-hive: 2.1.0 +* datadog-hivemq: 2.1.0 +* datadog-http-check: 11.0.0 +* datadog-hudi: 4.0.0 +* datadog-hyperv: 3.0.0 +* datadog-ibm-ace: 4.0.0 +* datadog-ibm-db2: 4.0.0 +* datadog-ibm-i: 4.0.0 +* datadog-ibm-mq: 8.0.0 +* datadog-ibm-was: 5.0.0 +* datadog-ignite: 3.1.0 +* datadog-iis: 5.0.0 +* datadog-impala: 3.1.0 +* datadog-istio: 8.0.0 +* datadog-jboss-wildfly: 3.1.0 +* datadog-journald: 3.0.0 +* datadog-kafka-consumer: 6.1.0 +* datadog-kafka: 4.0.0 +* datadog-karpenter: 2.1.0 +* datadog-kong: 5.0.0 +* datadog-kube-apiserver-metrics: 6.0.0 +* datadog-kube-controller-manager: 7.0.0 +* datadog-kube-dns: 6.0.0 +* datadog-kube-metrics-server: 5.0.0 +* datadog-kube-proxy: 8.0.0 +* datadog-kube-scheduler: 6.0.0 +* datadog-kubeflow: 1.0.0 +* datadog-kubelet: 9.0.0 +* datadog-kubernetes-cluster-autoscaler: 2.1.0 +* datadog-kubernetes-state: 10.0.0 +* datadog-kubevirt-api: 1.0.0 +* datadog-kubevirt-controller: 1.0.0 +* datadog-kubevirt-handler: 1.0.0 +* datadog-kyototycoon: 4.0.0 +* datadog-kyverno: 2.1.0 +* datadog-lighttpd: 5.0.0 +* datadog-linkerd: 6.0.0 +* datadog-linux-proc-extras: 4.0.0 +* datadog-mapr: 3.0.0 +* datadog-mapreduce: 6.0.0 +* datadog-marathon: 4.0.0 +* datadog-marklogic: 6.0.0 +* datadog-mcache: 6.0.0 +* datadog-mesos-master: 5.0.0 +* datadog-mesos-slave: 5.0.0 +* datadog-mongo: 8.2.1 +* datadog-mysql: 14.2.0 +* datadog-nagios: 3.0.0 +* datadog-network: 5.1.0 +* datadog-nfsstat: 3.0.0 +* datadog-nginx-ingress-controller: 4.0.0 +* datadog-nginx: 8.0.0 +* datadog-nvidia-triton: 2.1.0 +* datadog-openldap: 3.0.0 +* datadog-openmetrics: 6.0.0 +* datadog-openstack-controller: 8.0.0 +* datadog-openstack: 4.0.0 +* datadog-oracle: 6.0.0 +* datadog-ossec-security: 2.0.0 +* datadog-palo-alto-panorama: 1.0.0 +* datadog-pan-firewall: 3.0.0 +* datadog-pdh-check: 4.0.0 +* datadog-pgbouncer: 8.0.0 +* datadog-php-fpm: 5.0.0 +* datadog-ping-federate: 2.0.0 +* datadog-postfix: 3.0.0 +* datadog-postgres: 22.2.0 +* datadog-powerdns-recursor: 4.0.0 +* datadog-presto: 3.1.0 +* datadog-process: 5.0.0 +* datadog-prometheus: 5.0.0 +* datadog-proxysql: 7.0.0 +* datadog-pulsar: 3.1.0 +* datadog-rabbitmq: 7.0.0 +* datadog-ray: 2.1.0 +* datadog-redisdb: 7.0.0 +* datadog-rethinkdb: 5.0.0 +* datadog-riak: 5.0.0 +* datadog-riakcs: 4.0.0 +* datadog-sap-hana: 5.0.0 +* datadog-scylla: 4.0.0 +* datadog-sidekiq: 3.0.0 +* datadog-silk: 4.0.0 +* datadog-singlestore: 4.0.0 +* datadog-slurm: 1.0.1 +* datadog-snmp: 9.0.0 +* datadog-snowflake: 7.0.0 +* datadog-solr: 2.1.0 +* datadog-sonarqube: 5.0.0 +* datadog-spark: 6.1.0 +* datadog-sqlserver: 20.1.1 +* datadog-squid: 4.0.0 +* datadog-ssh-check: 4.0.0 +* datadog-statsd: 3.0.0 +* datadog-strimzi: 3.1.0 +* datadog-supervisord: 4.0.0 +* datadog-suricata: 2.0.0 +* datadog-system-core: 4.0.0 +* datadog-system-swap: 3.0.0 +* datadog-tcp-check: 6.0.0 +* datadog-teamcity: 6.0.0 +* datadog-tekton: 2.1.0 +* datadog-teleport: 2.1.0 +* datadog-temporal: 3.1.0 +* datadog-tenable: 3.0.0 +* datadog-teradata: 4.0.0 +* datadog-tibco-ems: 2.1.0 +* datadog-tls: 4.0.0 +* datadog-tokumx: 3.2.0 +* datadog-tomcat: 4.0.0 +* datadog-torchserve: 3.1.0 +* datadog-traefik-mesh: 2.1.0 +* datadog-traffic-server: 3.1.0 +* datadog-twemproxy: 3.0.0 +* datadog-twistlock: 5.0.0 +* datadog-varnish: 4.0.0 +* datadog-vault: 6.0.0 +* datadog-vertica: 6.0.0 +* datadog-vllm: 2.1.0 +* datadog-voltdb: 5.0.0 +* datadog-vsphere: 8.0.1 +* datadog-weaviate: 3.1.0 +* datadog-weblogic: 3.0.0 +* datadog-win32-event-log: 5.0.0 +* datadog-windows-performance-counters: 3.1.0 +* datadog-windows-service: 6.0.0 +* datadog-wmi-check: 3.0.0 +* datadog-yarn: 7.0.0 +* datadog-zeek: 1.0.0 +* datadog-zk: 6.0.0 + ## Datadog Agent version 7.59.0 * datadog-active-directory: 4.0.0 diff --git a/airflow/README.md b/airflow/README.md index 94faf19dab2cc..40e8ffd4cde8e 100644 --- a/airflow/README.md +++ b/airflow/README.md @@ -126,7 +126,6 @@ Connect Airflow to DogStatsD (included in the Datadog Agent) by using the Airflo tags: dag_id: "$1" task_id: "$2" - - match: "airflow.pool.open_slots.*" - match: "airflow.dagrun.*.first_task_scheduling_delay" name: "airflow.dagrun.first_task_scheduling_delay" tags: diff --git a/airflow/assets/configuration/spec.yaml b/airflow/assets/configuration/spec.yaml index a78029f9ec994..cc4b4141d0ba8 100644 --- a/airflow/assets/configuration/spec.yaml +++ b/airflow/assets/configuration/spec.yaml @@ -13,6 +13,12 @@ files: description: The URL used to connect to the Airflow instance (use the Airflow web server REST API endpoint). value: type: string + - name: collect_ongoing_duration + required: false + description: Collect ongoing duration metric for DAG task instances. + value: + type: boolean + example: true - template: instances/http - template: instances/default - template: logs diff --git a/airflow/changelog.d/19278.added b/airflow/changelog.d/19278.added new file mode 100644 index 0000000000000..45bf7d91d95ab --- /dev/null +++ b/airflow/changelog.d/19278.added @@ -0,0 +1 @@ +Use `start_date` instead of `execution_date` for ongoing duration metrics \ No newline at end of file diff --git a/airflow/datadog_checks/airflow/airflow.py b/airflow/datadog_checks/airflow/airflow.py index f9d3e8157fe02..9e1f082520d87 100644 --- a/airflow/datadog_checks/airflow/airflow.py +++ b/airflow/datadog_checks/airflow/airflow.py @@ -19,7 +19,7 @@ def __init__(self, name, init_config, instances): self._url = self.instance.get('url', '') self._tags = self.instance.get('tags', []) - + self._collect_ongoing_duration = self.instance.get('collect_ongoing_duration', True) # The Agent only makes one attempt to instantiate each AgentCheck so any errors occurring # in `__init__` are logged just once, making it difficult to spot. Therefore, we emit # potential configuration errors as part of the check run phase. @@ -51,7 +51,7 @@ def check(self, _): else: submit_metrics(resp, tags) # Only calculate task duration for stable API - if target_url is url_stable: + if target_url is url_stable and self._collect_ongoing_duration: task_instances = self._get_all_task_instances(url_stable_task_instances, tags) if task_instances: self._calculate_task_ongoing_duration(task_instances, tags) @@ -118,14 +118,14 @@ def _calculate_task_ongoing_duration(self, tasks, tags): dag_task_tags = copy(tags) task_id = task.get('task_id') dag_id = task.get('dag_id') - execution_date = task.get('execution_date') + start_date = task.get('start_date') # Add tags for each task dag_task_tags.append('dag_id:{}'.format(dag_id)) dag_task_tags.append('task_id:{}'.format(task_id)) # Calculate ongoing duration - ongoing_duration = get_timestamp() - datetime.fromisoformat((execution_date)).timestamp() + ongoing_duration = get_timestamp() - datetime.fromisoformat((start_date)).timestamp() self.gauge('airflow.dag.task.ongoing_duration', ongoing_duration, tags=dag_task_tags) def _parse_config(self): diff --git a/airflow/datadog_checks/airflow/config_models/defaults.py b/airflow/datadog_checks/airflow/config_models/defaults.py index 81b466607723e..c88e64022a89a 100644 --- a/airflow/datadog_checks/airflow/config_models/defaults.py +++ b/airflow/datadog_checks/airflow/config_models/defaults.py @@ -24,6 +24,10 @@ def instance_auth_type(): return 'basic' +def instance_collect_ongoing_duration(): + return True + + def instance_disable_generic_tags(): return False diff --git a/airflow/datadog_checks/airflow/config_models/instance.py b/airflow/datadog_checks/airflow/config_models/instance.py index e31c99d217e86..265191f70aa1e 100644 --- a/airflow/datadog_checks/airflow/config_models/instance.py +++ b/airflow/datadog_checks/airflow/config_models/instance.py @@ -60,6 +60,7 @@ class InstanceConfig(BaseModel): aws_host: Optional[str] = None aws_region: Optional[str] = None aws_service: Optional[str] = None + collect_ongoing_duration: Optional[bool] = None connect_timeout: Optional[float] = None disable_generic_tags: Optional[bool] = None empty_default_hostname: Optional[bool] = None diff --git a/airflow/datadog_checks/airflow/data/conf.yaml.example b/airflow/datadog_checks/airflow/data/conf.yaml.example index 260c385662bcd..31105f3bf816f 100644 --- a/airflow/datadog_checks/airflow/data/conf.yaml.example +++ b/airflow/datadog_checks/airflow/data/conf.yaml.example @@ -50,6 +50,11 @@ instances: # - url: + ## @param collect_ongoing_duration - boolean - optional - default: true + ## Collect ongoing duration metric for DAG task instances. + # + # collect_ongoing_duration: true + ## @param proxy - mapping - optional ## This overrides the `proxy` setting in `init_config`. ## diff --git a/airflow/tests/test_unit.py b/airflow/tests/test_unit.py index 0cab38355d9ac..033334be280d7 100644 --- a/airflow/tests/test_unit.py +++ b/airflow/tests/test_unit.py @@ -118,3 +118,44 @@ def test_dag_task_ongoing_duration(aggregator, task_instance): tags=['key:my-tag', 'url:http://localhost:8080', 'dag_id:tutorial', 'task_id:sleep'], count=1, ) + + +@pytest.mark.parametrize( + "collect_ongoing_duration, should_call_method", + [ + pytest.param( + True, + [ + mock.call( + 'http://localhost:8080/api/v1/dags/~/dagRuns/~/taskInstances?state=running', + ['url:http://localhost:8080', 'key:my-tag'], + ) + ], + id="collect", + ), + pytest.param( + False, + [], + id="don't collect", + ), + ], +) +def test_config_collect_ongoing_duration(collect_ongoing_duration, should_call_method): + instance = {**common.FULL_CONFIG['instances'][0], 'collect_ongoing_duration': collect_ongoing_duration} + check = AirflowCheck('airflow', common.FULL_CONFIG, [instance]) + + with mock.patch('datadog_checks.airflow.airflow.AirflowCheck._get_version', return_value='2.6.2'): + with mock.patch('datadog_checks.base.utils.http.requests') as req: + mock_resp = mock.MagicMock(status_code=200) + mock_resp.json.side_effect = [ + {'metadatabase': {'status': 'healthy'}, 'scheduler': {'status': 'healthy'}}, + ] + req.get.return_value = mock_resp + + with mock.patch( + 'datadog_checks.airflow.airflow.AirflowCheck._get_all_task_instances' + ) as mock_get_all_task_instances: + check.check(None) + + # Assert method calls + mock_get_all_task_instances.assert_has_calls(should_call_method, any_order=False) diff --git a/authorize_net/CHANGELOG.md b/authorize_net/CHANGELOG.md new file mode 100644 index 0000000000000..d343bf7c205d4 --- /dev/null +++ b/authorize_net/CHANGELOG.md @@ -0,0 +1,7 @@ +# CHANGELOG - authorize.net + +## 1.0.0 / 2024-10-23 + +***Added***: + +* Initial Release \ No newline at end of file diff --git a/authorize_net/README.md b/authorize_net/README.md new file mode 100644 index 0000000000000..8ea656afd8068 --- /dev/null +++ b/authorize_net/README.md @@ -0,0 +1,35 @@ +# Authorize.Net + +## Overview + +Authorize.Net is a widely used payment gateway that allows businesses to accept secure payments through various channels, including online, mobile, and in-person transactions. Supporting a range of payment methods such as credit cards, e-checks, and digital wallets like Apple Pay and PayPal, it offers a versatile solution for merchants. The platform emphasizes security with features like encryption, tokenization, and PCI DSS compliance, while also providing advanced fraud detection tools + +## Setup + +### Configuration + +!!! Add list of steps to set up this integration !!! + +### Validation + +!!! Add steps to validate integration is functioning as expected !!! + +## Data Collected + +### Logs + +The Authorize.Net integration collects and forward settled transaction logs to Datadog. + +### Metrics + +The Authorize.Net integration collects and forward metrics to Datadog. + +### Events + +The Authorize.Net integration does not include any events. + +## Troubleshooting + +Need help? Contact [Datadog support][1]. + +[1]: https://docs.datadoghq.com/help/ \ No newline at end of file diff --git a/authorize_net/assets/service_checks.json b/authorize_net/assets/service_checks.json new file mode 100644 index 0000000000000..fe51488c7066f --- /dev/null +++ b/authorize_net/assets/service_checks.json @@ -0,0 +1 @@ +[] diff --git a/authorize_net/manifest.json b/authorize_net/manifest.json new file mode 100644 index 0000000000000..df97536b9149d --- /dev/null +++ b/authorize_net/manifest.json @@ -0,0 +1,49 @@ +{ + "manifest_version": "2.0.0", + "app_uuid": "a86b9a64-3785-471a-9563-a40e6f3fb86e", + "app_id": "authorize-net", + "display_on_public_website": false, + "tile": { + "overview": "README.md#Overview", + "configuration": "README.md#Setup", + "support": "README.md#Support", + "changelog": "CHANGELOG.md", + "description": "Gain insights into Settled and Unsettled Transactions of Authorize.Net", + "title": "Authorize.Net", + "media": [], + "classifier_tags": [ + "Category::Log Collection", + "Category::Metrics", + "Submitted Data Type::Logs", + "Submitted Data Type::Metrics", + "Offering::Integration" + ] + }, + "assets": { + "integration": { + "auto_install": false, + "source_type_id": 28965663, + "source_type_name": "Authorize.Net", + "events": { + "creates_events": false + }, + "metrics": { + "prefix": "authorize_net.", + "check": [], + "metadata_path": "metadata.csv" + }, + "service_checks": { + "metadata_path": "assets/service_checks.json" + } + }, + "logs": { + "source": "authorize_net" + } + }, + "author": { + "support_email": "help@datadoghq.com", + "name": "Datadog", + "homepage": "https://www.datadoghq.com", + "sales_email": "info@datadoghq.com" + } +} diff --git a/authorize_net/metadata.csv b/authorize_net/metadata.csv new file mode 100644 index 0000000000000..02cde5e98381e --- /dev/null +++ b/authorize_net/metadata.csv @@ -0,0 +1 @@ +metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name,curated_metric,sample_tags diff --git a/aws_neuron/assets/logs/aws_neuron.yaml b/aws_neuron/assets/logs/aws_neuron.yaml index a6699c424ce35..165a55e1e2a3c 100644 --- a/aws_neuron/assets/logs/aws_neuron.yaml +++ b/aws_neuron/assets/logs/aws_neuron.yaml @@ -7,29 +7,45 @@ pipeline: name: AWS Neuron enabled: true filter: - query: 'source:aws_neuron' + query: "source:aws_neuron" processors: - type: grok-parser name: Operator compilation enabled: true source: message samples: - - 'INFO:Neuron: => aten::Int: 96' - - 'INFO:Neuron: => aten::Int: 1 [supported]' - - 'INFO:Neuron: => aten::embedding: 3 [not supported]' + - "INFO:Neuron: => aten::Int: 96" + - "INFO:Neuron: => aten::Int: 1 [supported]" + - "INFO:Neuron: => aten::embedding: 3 [not supported]" grok: supportRules: operator %{word:operator.library}::%{word:operator.type} - matchRules: 'operator_rule INFO:Neuron: => %{operator}: %{integer:count}( \[%{data:not_compiled_msg}\])?' + matchRules: "operator_rule INFO:Neuron: => %{operator}: %{integer:count}( + \\[%{data:not_compiled_msg}\\])?" - type: grok-parser name: Message separation enabled: true source: message samples: - - INFO:Neuron:Number of arithmetic operators (pre-compilation) before = 565, fused = 548, percent fused = 96.99% - - 'INFO:Neuron: => aten::layer_norm: 25' + - INFO:Neuron:Number of arithmetic operators (pre-compilation) before = + 565, fused = 548, percent fused = 96.99% + - "INFO:Neuron: => aten::layer_norm: 25" + - > + 2024-11-15 10:38:24.000103: 4938 ERROR ||NEURON_CC_WRAPPER||: + Compilation failed for + /tmp/ubuntu/neuroncc_compile_workdir/cf6cf570-d889-4a0c-a821-719e225d9bc8/model.MODULE_16150394314145281873+d7517139.hlo_module.pb + after 0 retries. + - > + 2024-Nov-15 + 13:35:03.0879 6475:6475 ERROR NRT:nrt_allocate_neuron_cores NeuronCore(s) + not available - Requested:16 Available:4 grok: supportRules: "" - matchRules: message_rule %{word:level}:Neuron:( => )?%{data:msg} + matchRules: >- + info_rule %{word:level}:Neuron:( => )?%{data:msg} + + error_rule_1 %{date("yyyy-MM-dd' 'HH:mm:ss.SSSSSS"):date}: %{integer:pid} %{word:level} %{data:msg} + + error_rule_2 %{date("yyyy-MMM-dd' 'HH:mm:ss.SSSS"):date} %{integer:pid}:%{integer:tid} %{word:level} %{data:msg} - type: message-remapper name: Define `msg` as the official message of the log enabled: true @@ -40,3 +56,8 @@ pipeline: enabled: true sources: - level + - type: status-remapper + name: Define `level` as the official status of the log + enabled: true + sources: + - level diff --git a/aws_neuron/assets/logs/aws_neuron_tests.yaml b/aws_neuron/assets/logs/aws_neuron_tests.yaml index 9c88b85826efc..14f9b411f27db 100644 --- a/aws_neuron/assets/logs/aws_neuron_tests.yaml +++ b/aws_neuron/assets/logs/aws_neuron_tests.yaml @@ -36,3 +36,27 @@ tests: status: "info" tags: - "source:LOGS_SOURCE" + - + sample: "2024-11-15 10:38:24.000103: 4938 ERROR ||NEURON_CC_WRAPPER||: Compilation failed for /tmp/ubuntu/neuroncc_compile_workdir/cf6cf570-d889-4a0c-a821-719e225d9bc8/model.MODULE_16150394314145281873+d7517139.hlo_module.pb after 0 retries." + result: + custom: + date: 1731667104000 + level: "ERROR" + pid: 4938 + message: "||NEURON_CC_WRAPPER||: Compilation failed for /tmp/ubuntu/neuroncc_compile_workdir/cf6cf570-d889-4a0c-a821-719e225d9bc8/model.MODULE_16150394314145281873+d7517139.hlo_module.pb after 0 retries." + status: "error" + tags: + - "source:LOGS_SOURCE" + - + sample: "2024-Nov-15 13:35:03.0879 6475:6475 ERROR NRT:nrt_allocate_neuron_cores NeuronCore(s) not available - Requested:16 Available:4" + result: + custom: + date: 1731677703087 + level: "ERROR" + pid: 6475 + tid: 6475 + message: " NRT:nrt_allocate_neuron_cores NeuronCore(s) not available - Requested:16 Available:4" + status: "error" + tags: + - "source:LOGS_SOURCE" + diff --git a/brevo/CHANGELOG.md b/brevo/CHANGELOG.md new file mode 100644 index 0000000000000..764eff384da15 --- /dev/null +++ b/brevo/CHANGELOG.md @@ -0,0 +1,7 @@ +# CHANGELOG - Brevo + +## 1.0.0 / 2024-08-27 + +***Added***: + +* Initial Release diff --git a/brevo/README.md b/brevo/README.md new file mode 100644 index 0000000000000..098f88f3efe9e --- /dev/null +++ b/brevo/README.md @@ -0,0 +1,64 @@ +# Brevo + +## Overview + +[Brevo][1] is a cloud-based digital marketing platform designed for creating, sending, and tracking email campaigns, transactional emails, and more. It offers tools for automation and analytics, helping businesses optimize their email marketing strategies and monitor performance. + +Integrate Brevo with Datadog to gain insights into Brevo marketing campaign emails and track Brevo performance based on events and other transactional events using [webhooks][2]. + +## Setup + +Follow the instructions below to configure this integration for Brevo Marketing and Transactional events through a Webhook. + +### Configuration + +#### Webhook configuration for marketing events +Configure the Datadog endpoint to forward Brevo marketing events as logs to Datadog. For more details, see the Brevo [Marketing webhooks][3] documentation. + +1. Select an existing API key or create a new one by clicking one of the buttons below: +2. Log in to your [Brevo account][4]. +3. In the left-side panel, navigate to **Campaigns**. +4. Navigate to the **Settings** Page. +5. Under the **Webhooks** section, click **Configure**. +6. Click **Add a New Webhook**. +7. Enter the webhook URL that you identified previously. +8. Choose the types of messages and contact logs you want to forward to Datadog. +9. Click **Add**. + +#### Webhook configuration for transactional events +Configure the Datadog endpoint to forward Brevo transactional events as logs to Datadog. For more details, see the Brevo [Transactional webhooks][5] documentation. + +1. Select an existing API key or create a new one by clicking one of the buttons below: +2. Log in to your [Brevo account][4]. If you are already logged in, Brevo automatically redirects to the [Brevo homepage][6]. +3. In the left-side panel, navigate to **Transactional**. +4. In **Settings**, click "**Webhook**". +5. Click **Add a new webhook**. +6. Enter the webhook URL that you identified previously. +7. Select the types of message logs to forward to Datadog. +8. Click **Save**. + +## Data Collected + +### Logs +The Brevo integration forwards the marketing and transactional event logs to Datadog. + +### Metrics +Brevo does not include any metrics. + +### Service Checks +Brevo does not include any service checks. + +### Events +Brevo does not include any events. + +## Troubleshooting + +Need help? Contact [Datadog support][7]. + +[1]: https://www.brevo.com/products/marketing-platform/ +[2]: https://developers.brevo.com/docs/how-to-use-webhooks +[3]: https://developers.brevo.com/docs/marketing-webhooks +[4]: https://login.brevo.com/ +[5]: https://developers.brevo.com/docs/transactional-webhooks +[6]: https://app.brevo.com/ +[7]: https://docs.datadoghq.com/help/ diff --git a/brevo/assets/brevo.svg b/brevo/assets/brevo.svg new file mode 100644 index 0000000000000..a86cb89a46ded --- /dev/null +++ b/brevo/assets/brevo.svg @@ -0,0 +1,3 @@ + + + diff --git a/brevo/assets/dashboards/brevo_marketing_events.json b/brevo/assets/dashboards/brevo_marketing_events.json new file mode 100644 index 0000000000000..0789f2a571044 --- /dev/null +++ b/brevo/assets/dashboards/brevo_marketing_events.json @@ -0,0 +1,1078 @@ +{ + "title": "Brevo - Marketing Events", + "description": "This Dashboard provides a comprehensive analysis of the Brevo marketing events", + "widgets": [ + { + "id": 2074004104711888, + "definition": { + "type": "image", + "url": "https://www.brevo.com/_next/image/?url=https%3A%2F%2Fcorp-backend.brevo.com%2Fwp-content%2Fuploads%2F2023%2F12%2Fcover.webp&w=640&q=75", + "url_dark_theme": "https://www.brevo.com/_next/image/?url=https%3A%2F%2Fcorp-backend.brevo.com%2Fwp-content%2Fuploads%2F2023%2F04%2FLight-logo-cover-1.jpg&w=640&q=75", + "sizing": "cover", + "has_background": true, + "has_border": true, + "vertical_align": "center", + "horizontal_align": "center" + }, + "layout": { + "x": 0, + "y": 0, + "width": 6, + "height": 3 + } + }, + { + "id": 1383597922205404, + "definition": { + "title": "Emails Delivered", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:marketing-events @evt.name:delivered $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "custom_bg", + "custom_bg_color": "#dbdef5" + } + ] + } + ], + "autoscale": true, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 6, + "y": 0, + "width": 6, + "height": 3 + } + }, + { + "id": 8917290706883810, + "definition": { + "type": "note", + "content": "Gain better visibility into your email marketing performance by monitoring Brevo Marketing events with this dashboard.\n\nFor more information, see the [Brevo Integration Documentation](https://docs.datadoghq.com/integrations/brevo).\n\n**Tips**\n- Use the timeframe selector in the top right of the dashboard to change the default timeframe.\n- Clone this dashboard to rearrange, modify and add widgets and visualizations.\n", + "background_color": "green", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": true, + "tick_pos": "50%", + "tick_edge": "top", + "has_padding": true + }, + "layout": { + "x": 0, + "y": 3, + "width": 6, + "height": 3 + } + }, + { + "id": 7878012237089596, + "definition": { + "title": "Emails Opened", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:marketing-events @evt.name:opened $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_green" + } + ] + } + ], + "autoscale": true, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 6, + "y": 3, + "width": 6, + "height": 3 + } + }, + { + "id": 1554629846571348, + "definition": { + "title": "Email Event Trends: Spam, Bounces, and Unsubscribes", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "horizontal", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "style": { + "palette": "warm", + "palette_index": 3 + }, + "alias": "Hard Bounced", + "formula": "query1" + }, + { + "style": { + "palette": "orange", + "palette_index": 4 + }, + "alias": "Soft Bounced", + "formula": "query2" + }, + { + "style": { + "palette": "warm", + "palette_index": 0 + }, + "alias": "Marked as Spam", + "formula": "query3" + }, + { + "style": { + "palette": "warm", + "palette_index": 4 + }, + "alias": "Unsubscribed", + "formula": "query4" + } + ], + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:marketing-events @evt.name:hard_bounce $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query2", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:marketing-events @evt.name:soft_bounce $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query3", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:marketing-events @evt.name:spam $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query4", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:marketing-events @evt.name:unsubscribe $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 0, + "y": 6, + "width": 12, + "height": 5 + } + }, + { + "id": 897131023425208, + "definition": { + "title": "Email Open Activity ", + "title_size": "16", + "title_align": "left", + "show_legend": false, + "time": { + "type": "live", + "unit": "day", + "value": 1 + }, + "type": "heatmap", + "yaxis": { + "include_zero": true, + "scale": "linear", + "min": "auto", + "max": "auto" + }, + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:marketing-events @evt.name:opened $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic" + } + } + ] + }, + "layout": { + "x": 0, + "y": 11, + "width": 7, + "height": 5 + } + }, + { + "id": 6836248737505238, + "definition": { + "title": "Most Clicked URLs", + "title_size": "16", + "title_align": "left", + "type": "query_table", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@campaign name", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + }, + { + "facet": "@http.url", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:marketing-events @evt.name:click $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "sort": { + "count": 100, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + }, + "formulas": [ + { + "cell_display_mode": "number", + "alias": "count", + "formula": "query1" + } + ] + } + ], + "has_search_bar": "auto" + }, + "layout": { + "x": 7, + "y": 11, + "width": 5, + "height": 5 + } + }, + { + "id": 8329971943698914, + "definition": { + "title": "Campaign Performance Summary", + "title_size": "16", + "title_align": "left", + "type": "query_table", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@campaign name", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:marketing-events @evt.name:delivered $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query2", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@campaign name", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:marketing-events @evt.name:opened $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query3", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@campaign name", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:marketing-events @evt.name:click $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query4", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@campaign name", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:marketing-events @evt.name:hard_bounce $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query5", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@campaign name", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:marketing-events @evt.name:soft_bounce $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query6", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@campaign name", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:marketing-events @evt.name:spam $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query7", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@campaign name", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:marketing-events @evt.name:unsubscribe $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "sort": { + "count": 70, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + }, + "formulas": [ + { + "cell_display_mode": "number", + "alias": "Delivered", + "formula": "query1" + }, + { + "cell_display_mode": "number", + "alias": "Opened", + "formula": "query2" + }, + { + "cell_display_mode": "number", + "alias": "Clicked", + "formula": "query3" + }, + { + "cell_display_mode": "number", + "alias": "Hard Bounced", + "formula": "query4" + }, + { + "cell_display_mode": "number", + "alias": "Soft Bounced", + "formula": "query5" + }, + { + "cell_display_mode": "number", + "alias": "Marked as Spam", + "formula": "query6" + }, + { + "cell_display_mode": "number", + "alias": "Unsubscribed", + "formula": "query7" + } + ] + } + ], + "has_search_bar": "auto" + }, + "layout": { + "x": 0, + "y": 16, + "width": 12, + "height": 4 + } + }, + { + "id": 991970123942130, + "definition": { + "title": "Most Engaged recipients", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@usr.email", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:marketing-events $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + } + } + }, + "layout": { + "x": 0, + "y": 20, + "width": 6, + "height": 4 + } + }, + { + "id": 5154411203269646, + "definition": { + "title": "Most Opened Campaigns", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@campaign name", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:marketing-events @evt.name:opened $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + } + } + }, + "layout": { + "x": 6, + "y": 20, + "width": 6, + "height": 4 + } + }, + { + "id": 6281401150190870, + "definition": { + "title": "Top Reasons for Soft Bounce Events", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@reason", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:marketing-events @evt.name:soft_bounce $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + } + } + }, + "layout": { + "x": 0, + "y": 24, + "width": 4, + "height": 4 + } + }, + { + "id": 1122235000103890, + "definition": { + "title": "Top Reasons for Hard Bounce Events", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@reason", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:marketing-events @evt.name:hard_bounce $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + } + } + }, + "layout": { + "x": 4, + "y": 24, + "width": 4, + "height": 4 + } + }, + { + "id": 1499029347730738, + "definition": { + "title": "Top Reasons for Spam Events", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@reason", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:marketing-events @evt.name:spam $campaign_id $event_name $recipient_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + } + } + }, + "layout": { + "x": 8, + "y": 24, + "width": 4, + "height": 4 + } + }, + { + "id": 3725343105146853, + "definition": { + "title": "Marketing Event List", + "title_size": "16", + "title_align": "left", + "requests": [ + { + "response_format": "event_list", + "query": { + "data_source": "logs_stream", + "query_string": "source:brevo service:marketing-events $campaign_id $recipient_email $event_name", + "indexes": [], + "storage": "hot" + }, + "columns": [ + { + "field": "status_line", + "width": "auto" + }, + { + "field": "timestamp", + "width": "auto" + }, + { + "field": "@evt.name", + "width": "auto" + }, + { + "field": "@brevo.camp_id", + "width": "auto" + }, + { + "field": "@campaign name", + "width": "auto" + }, + { + "field": "@network.client.ip", + "width": "auto" + }, + { + "field": "@usr.email", + "width": "auto" + }, + { + "field": "@reason", + "width": "auto" + } + ] + } + ], + "type": "list_stream" + }, + "layout": { + "x": 0, + "y": 28, + "width": 12, + "height": 4 + } + } + ], + "template_variables": [ + { + "name": "event_name", + "prefix": "@evt.name", + "available_values": [], + "default": "*" + }, + { + "name": "recipient_email", + "prefix": "@usr.email", + "available_values": [], + "default": "*" + }, + { + "name": "campaign_id", + "prefix": "@brevo.camp_id", + "available_values": [], + "default": "*" + } + ], + "layout_type": "ordered", + "notify_list": [], + "reflow_type": "fixed" +} \ No newline at end of file diff --git a/brevo/assets/dashboards/brevo_transactional_events.json b/brevo/assets/dashboards/brevo_transactional_events.json new file mode 100644 index 0000000000000..fb748d8041820 --- /dev/null +++ b/brevo/assets/dashboards/brevo_transactional_events.json @@ -0,0 +1,1440 @@ +{ + "title": "Brevo - Transactional Events", + "description": "This Dashboard provides a comprehensive analysis of the Brevo transactional events", + "widgets": [ + { + "id": 7622028875118342, + "definition": { + "type": "image", + "url": "https://www.brevo.com/_next/image/?url=https%3A%2F%2Fcorp-backend.brevo.com%2Fwp-content%2Fuploads%2F2023%2F12%2Fcover.webp&w=640&q=75", + "url_dark_theme": "https://www.brevo.com/_next/image/?url=https%3A%2F%2Fcorp-backend.brevo.com%2Fwp-content%2Fuploads%2F2023%2F04%2FLight-logo-cover-1.jpg&w=640&q=75", + "sizing": "cover", + "has_background": true, + "has_border": true, + "vertical_align": "center", + "horizontal_align": "center" + }, + "layout": { + "x": 0, + "y": 0, + "width": 6, + "height": 3 + } + }, + { + "id": 7638012328352196, + "definition": { + "title": "Campaign Monitors Summary", + "type": "manage_status", + "display_format": "countsAndList", + "color_preference": "background", + "hide_zero_counts": true, + "show_status": true, + "last_triggered_format": "relative", + "query": "tag:brevo $subject $recipient_email $sender_email $event_name", + "sort": "status,asc", + "count": 50, + "start": 0, + "summary_type": "monitors", + "show_priority": false, + "show_last_triggered": false + }, + "layout": { + "x": 6, + "y": 0, + "width": 6, + "height": 6 + } + }, + { + "id": 2085088109137856, + "definition": { + "type": "note", + "content": "Gain better visibility into your transactional email performance by monitoring Brevo transactional event logs with this dashboard.\n\nFor more information, see the [Brevo Integration Documentation](https://docs.datadoghq.com/integrations/brevo).\n\n**Tips**\n- Use the timeframe selector in the top right of the dashboard to change the default timeframe.\n- Clone this dashboard to rearrange, modify and add widgets and visualizations.\n", + "background_color": "green", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": true, + "tick_pos": "50%", + "tick_edge": "top", + "has_padding": true + }, + "layout": { + "x": 0, + "y": 3, + "width": 6, + "height": 3 + } + }, + { + "id": 1921565840906662, + "definition": { + "title": "Emails Sent", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:transactional-events @evt.name:request $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "custom_bg", + "custom_bg_color": "#f5eee5" + } + ] + } + ], + "autoscale": true, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 6, + "width": 4, + "height": 3 + } + }, + { + "id": 1383597922205404, + "definition": { + "title": "Emails Delivered", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:transactional-events @evt.name:delivered $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "custom_bg", + "custom_bg_color": "#dbdef5" + } + ] + } + ], + "autoscale": true, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 4, + "y": 6, + "width": 4, + "height": 3 + } + }, + { + "id": 4858909609907946, + "definition": { + "title": "Uniquely Opened Emails", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:transactional-events @evt.name:unique_opened $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_green" + } + ] + } + ], + "autoscale": true, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 8, + "y": 6, + "width": 4, + "height": 3 + } + }, + { + "id": 1554629846571348, + "definition": { + "title": "Email Event Trends: Spam, Bounces, Unsubscribes, Errors and Blocks", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "horizontal", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "style": { + "palette": "classic", + "palette_index": 3 + }, + "alias": "Hard Bounced", + "formula": "query1" + }, + { + "style": { + "palette": "purple", + "palette_index": 1 + }, + "alias": "Soft Bounced", + "formula": "query2" + }, + { + "style": { + "palette": "classic", + "palette_index": 0 + }, + "alias": "Marked as Spam", + "formula": "query3" + }, + { + "style": { + "palette": "warm", + "palette_index": 1 + }, + "alias": "Unsubscribed", + "formula": "query4" + }, + { + "style": { + "palette": "warm", + "palette_index": 3 + }, + "alias": "Error", + "formula": "query5" + }, + { + "style": { + "palette": "warm", + "palette_index": 4 + }, + "alias": "Blocked", + "formula": "query6" + }, + { + "style": { + "palette": "gray", + "palette_index": 3 + }, + "alias": "Deferred", + "formula": "query7" + } + ], + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:transactional-events @evt.name:hard_bounce $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query2", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:transactional-events @evt.name:soft_bounce $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query3", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:transactional-events @evt.name:spam $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query4", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:transactional-events @evt.name:unsubscribed $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query5", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:transactional-events @evt.name:invalid_email $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query6", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:transactional-events @evt.name:blocked $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query7", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:transactional-events @evt.name:deferred $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 0, + "y": 9, + "width": 12, + "height": 4 + } + }, + { + "id": 8329971943698914, + "definition": { + "title": "Email Performance Metrics by Subject", + "title_size": "16", + "title_align": "left", + "type": "query_table", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:request $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query12", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:unique_opened $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query2", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:delivered $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query3", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:opened $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query4", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:click $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query5", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:hard_bounce $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query6", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:soft_bounce $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query7", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:spam $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query8", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:unsubscribed $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query9", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:invalid_email $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query10", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:blocked $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query11", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:deferred $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "sort": { + "count": 120, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + }, + "formulas": [ + { + "cell_display_mode": "number", + "alias": "Sent", + "formula": "query1" + }, + { + "cell_display_mode": "number", + "alias": "Delivered", + "formula": "query2" + }, + { + "cell_display_mode": "number", + "alias": "Uniquely Opened", + "formula": "query12" + }, + { + "cell_display_mode": "number", + "alias": "Opened", + "formula": "query3" + }, + { + "cell_display_mode": "number", + "alias": "Clicked", + "formula": "query4" + }, + { + "cell_display_mode": "number", + "alias": "Hard Bounced", + "formula": "query5" + }, + { + "cell_display_mode": "number", + "alias": "Soft Bounced", + "formula": "query6" + }, + { + "cell_display_mode": "number", + "alias": "Spam", + "formula": "query7" + }, + { + "cell_display_mode": "number", + "alias": "Unsubscribed", + "formula": "query8" + }, + { + "cell_display_mode": "number", + "alias": "Error", + "formula": "query9" + }, + { + "cell_display_mode": "number", + "alias": "Blocked", + "formula": "query10" + }, + { + "cell_display_mode": "number", + "alias": "Deferred", + "formula": "query11" + } + ] + } + ], + "has_search_bar": "auto" + }, + "layout": { + "x": 0, + "y": 13, + "width": 12, + "height": 4 + } + }, + { + "id": 897131023425208, + "definition": { + "title": "Email Open Activity ", + "title_size": "16", + "title_align": "left", + "show_legend": false, + "time": { + "type": "live", + "unit": "day", + "value": 1 + }, + "type": "heatmap", + "yaxis": { + "include_zero": true, + "scale": "linear", + "min": "auto", + "max": "auto" + }, + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [], + "search": { + "query": "source:brevo service:transactional-events @evt.name:opened $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic" + } + } + ] + }, + "layout": { + "x": 0, + "y": 17, + "width": 6, + "height": 4 + } + }, + { + "id": 6836248737505238, + "definition": { + "title": "Most Clicked URLs", + "title_size": "16", + "title_align": "left", + "type": "query_table", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@http.url", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:click $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + }, + "formulas": [ + { + "cell_display_mode": "number", + "alias": "count", + "formula": "query1" + } + ] + } + ], + "has_search_bar": "auto" + }, + "layout": { + "x": 6, + "y": 17, + "width": 6, + "height": 4 + } + }, + { + "id": 991970123942130, + "definition": { + "title": "Most Engaged Recipients", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@usr.email", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + } + } + }, + "layout": { + "x": 0, + "y": 21, + "width": 6, + "height": 4 + } + }, + { + "id": 3569244924085276, + "definition": { + "title": "Most Engaged Senders", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@sender_email", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + } + } + }, + "layout": { + "x": 6, + "y": 21, + "width": 6, + "height": 4 + } + }, + { + "id": 6281401150190870, + "definition": { + "title": "Top Reasons for Soft Bounce Events", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@reason", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:soft_bounce $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + } + } + }, + "layout": { + "x": 0, + "y": 25, + "width": 6, + "height": 4 + } + }, + { + "id": 1122235000103890, + "definition": { + "title": "Top Reasons for Hard Bounce Events", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@reason", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:hard_bounce $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + } + } + }, + "layout": { + "x": 6, + "y": 25, + "width": 6, + "height": 4 + } + }, + { + "id": 5154411203269646, + "definition": { + "title": "Most Used Subjects", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + } + } + }, + "layout": { + "x": 0, + "y": 29, + "width": 6, + "height": 4 + } + }, + { + "id": 5702461649653348, + "definition": { + "title": "Top Reasons for Deferred Events", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@reason", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:deferred $event_name $recipient_email $subject $sender_email" + }, + "storage": "hot" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + } + } + }, + "layout": { + "x": 6, + "y": 29, + "width": 6, + "height": 4 + } + }, + { + "id": 1443743821978885, + "definition": { + "title": "Transactional Event List", + "title_size": "16", + "title_align": "left", + "requests": [ + { + "response_format": "event_list", + "query": { + "data_source": "logs_stream", + "query_string": "source:brevo service:transactional-events $event_name $recipient_email $subject $sender_email", + "indexes": [], + "storage": "hot" + }, + "columns": [ + { + "field": "status_line", + "width": "auto" + }, + { + "field": "timestamp", + "width": "auto" + }, + { + "field": "@evt.name", + "width": "auto" + }, + { + "field": "@sender_email", + "width": "auto" + }, + { + "field": "@network.client.ip", + "width": "auto" + }, + { + "field": "@usr.email", + "width": "auto" + }, + { + "field": "@subject", + "width": "auto" + }, + { + "field": "@reason", + "width": "auto" + } + ] + } + ], + "type": "list_stream" + }, + "layout": { + "x": 0, + "y": 33, + "width": 12, + "height": 4 + } + } + ], + "template_variables": [ + { + "name": "event_name", + "prefix": "@evt.name", + "available_values": [], + "default": "*" + }, + { + "name": "sender_email", + "prefix": "@sender_email", + "available_values": [], + "default": "*" + }, + { + "name": "recipient_email", + "prefix": "@usr.email", + "available_values": [], + "default": "*" + }, + { + "name": "subject", + "prefix": "@subject", + "available_values": [], + "default": "*" + } + ], + "layout_type": "ordered", + "notify_list": [], + "reflow_type": "fixed" +} \ No newline at end of file diff --git a/brevo/assets/logs/brevo.yaml b/brevo/assets/logs/brevo.yaml new file mode 100644 index 0000000000000..d89f9cfdb293a --- /dev/null +++ b/brevo/assets/logs/brevo.yaml @@ -0,0 +1,277 @@ +id: brevo +metric_id: brevo +backend_only: false +facets: + - groups: + - Event + name: Event Name + path: evt.name + source: log + - groups: + - Web Access + name: URL Path + path: http.url + source: log + - groups: + - Web Access + name: URL Host + path: http.url_details.host + source: log + - groups: + - Web Access + name: URL Path + path: http.url_details.path + source: log + - groups: + - Web Access + name: URL Port + path: http.url_details.port + source: log + - groups: + - Web Access + name: URL scheme + path: http.url_details.scheme + source: log + - groups: + - Web Access + name: User-Agent + path: http.useragent + source: log + - groups: + - Web Access + name: Browser + path: http.useragent_details.browser.family + source: log + - groups: + - Web Access + name: Device + path: http.useragent_details.device.family + source: log + - groups: + - Web Access + name: OS + path: http.useragent_details.os.family + source: log + - groups: + - Geoip + name: City Name + path: network.client.geoip.city.name + source: log + - groups: + - Geoip + name: Continent Code + path: network.client.geoip.continent.code + source: log + - groups: + - Geoip + name: Continent Name + path: network.client.geoip.continent.name + source: log + - groups: + - Geoip + name: Country ISO Code + path: network.client.geoip.country.iso_code + source: log + - groups: + - Geoip + name: Country Name + path: network.client.geoip.country.name + source: log + - groups: + - Geoip + name: Subdivision ISO Code + path: network.client.geoip.subdivision.iso_code + source: log + - groups: + - Geoip + name: Subdivision Name + path: network.client.geoip.subdivision.name + source: log + - groups: + - Web Access + name: Client IP + path: network.client.ip + source: log + - groups: + - User + name: User Email + path: usr.email + source: log + - facetType: list + groups: + - Brevo + name: Campaign Id + path: brevo.camp_id + source: log + type: string +pipeline: + type: pipeline + name: Brevo + enabled: true + filter: + query: "source:brevo" + processors: + - type: attribute-remapper + name: Map `email` to `usr.email` + enabled: true + sources: + - email + sourceType: attribute + target: usr.email + targetType: attribute + preserveSource: false + overrideOnConflict: false + - type: attribute-remapper + name: Map `event` to `evt.name` + enabled: true + sources: + - event + sourceType: attribute + target: evt.name + targetType: attribute + preserveSource: false + overrideOnConflict: false + - type: attribute-remapper + name: Map `sending_ip` to `network.client.ip` + enabled: true + sources: + - sending_ip + sourceType: attribute + target: network.client.ip + targetType: attribute + preserveSource: false + overrideOnConflict: false + - type: geo-ip-parser + name: Extracting geolocation information from the client IP + enabled: true + sources: + - network.client.ip + target: network.client.geoip + ip_processing_behavior: do-nothing + - type: pipeline + name: Processing for transactional events + enabled: true + filter: + query: "service:transactional-events" + processors: + - type: attribute-remapper + name: Map `ts_epoch` to `timestamp` + enabled: true + sources: + - ts_epoch + sourceType: attribute + target: timestamp + targetType: attribute + preserveSource: false + overrideOnConflict: false + - type: attribute-remapper + name: Map `user_agent` to `http.useragent` + enabled: true + sources: + - user_agent + sourceType: attribute + target: http.useragent + targetType: attribute + preserveSource: false + overrideOnConflict: false + - type: attribute-remapper + name: Map `link` to `http.url` + enabled: true + sources: + - link + sourceType: attribute + target: http.url + targetType: attribute + preserveSource: false + overrideOnConflict: false + - name: Lookup for `evt.name` to `status` field + enabled: true + source: evt.name + target: status + lookupTable: |- + request,info + click,info + deferred,warning + delivered,Success + soft_bounce,warning + spam,warning + unique_opened,info + hard_bounce,error + opened,info + invalid_email,error + blocked,error + unsubscribed,warning + proxy_open,info + unique_proxy_open,info + type: lookup-processor + - type: user-agent-parser + name: Extracting user-agent information from the user-agent + enabled: true + sources: + - http.useragent + target: http.useragent_details + encoded: false + combineVersionDetails: false + - type: pipeline + name: Processing for marketing events + enabled: true + filter: + query: "service:marketing-events" + processors: + - type: arithmetic-processor + name: Convert `ts_event` from second to millisecond EPOCH UNIX format and map to + `timestamp` + enabled: true + expression: ts_event * 1000 + target: timestamp + replaceMissing: false + - type: attribute-remapper + name: Map `camp_id` to `brevo.camp_id` + enabled: true + sources: + - camp_id + sourceType: attribute + target: brevo.camp_id + targetType: attribute + preserveSource: false + overrideOnConflict: false + - type: attribute-remapper + name: Map `URL` to `http.url` + enabled: true + sources: + - URL + sourceType: attribute + target: http.url + targetType: attribute + preserveSource: false + overrideOnConflict: false + - name: Lookup for `evt.name` to `status` field + enabled: true + source: evt.name + target: status + lookupTable: |- + spam,warning + opened,info + click,info + hard_bounce,error + soft_bounce,warning + delivered,Success + unsubscribe,warning + type: lookup-processor + - type: date-remapper + name: Define `timestamp` as the official date of the log + enabled: true + sources: + - timestamp + - type: status-remapper + name: Define `status` as the official status of the log + enabled: true + sources: + - status + - type: url-parser + name: Extracting url-details from the url + enabled: true + sources: + - http.url + target: http.url_details + normalizeEndingSlashes: false diff --git a/brevo/assets/logs/brevo_tests.yaml b/brevo/assets/logs/brevo_tests.yaml new file mode 100644 index 0000000000000..a84951b0d4267 --- /dev/null +++ b/brevo/assets/logs/brevo_tests.yaml @@ -0,0 +1,1082 @@ +id: brevo +tests: + - sample: |- + { + "date_sent" : "2020-10-09 00:00:00", + "segment_ids" : [ 1, 10 ], + "date_event" : "2020-10-09 00:00:00", + "campaign name" : "My First Campaign", + "ts_sent" : 1604933619, + "camp_id" : 1, + "id" : 11111, + "tag" : "", + "event" : "opened", + "ts_event" : 1604933737, + "email" : "example@domain.com", + "ts" : 1604937337 + } + service: "marketing-events" + result: + custom: + brevo: + camp_id: 1 + campaign name: "My First Campaign" + date_event: "2020-10-09 00:00:00" + date_sent: "2020-10-09 00:00:00" + evt: + name: "opened" + id: 11111 + segment_ids: + - 1 + - 10 + status: "info" + tag: "" + timestamp: 1.604933737E12 + ts: 1604937337 + ts_event: 1604933737 + ts_sent: 1604933619 + usr: + email: "example@domain.com" + message: |- + { + "date_sent" : "2020-10-09 00:00:00", + "segment_ids" : [ 1, 10 ], + "date_event" : "2020-10-09 00:00:00", + "campaign name" : "My First Campaign", + "ts_sent" : 1604933619, + "camp_id" : 1, + "id" : 11111, + "tag" : "", + "event" : "opened", + "ts_event" : 1604933737, + "email" : "example@domain.com", + "ts" : 1604937337 + } + service: "marketing-events" + status: "info" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933737000 + - sample: |- + { + "date_sent" : "2020-10-09 00:00:00", + "campaign name" : "My First Campaign", + "ts_event" : 1604933737, + "URL" : "https://myCampaignUrl.net", + "segment_ids" : [ 1, 10 ], + "date_event" : "2020-10-09 00:00:00", + "ts_sent" : 1604933619, + "camp_id" : 1, + "id" : 11111, + "tag" : "", + "event" : "click", + "email" : "example@domain.com", + "ts" : 1604937337 + } + service: "marketing-events" + result: + custom: + brevo: + camp_id: 1 + campaign name: "My First Campaign" + date_event: "2020-10-09 00:00:00" + date_sent: "2020-10-09 00:00:00" + evt: + name: "click" + http: + url: "https://myCampaignUrl.net" + url_details: + host: "mycampaignurl.net" + path: "" + scheme: "https" + id: 11111 + segment_ids: + - 1 + - 10 + status: "info" + tag: "" + timestamp: 1.604933737E12 + ts: 1604937337 + ts_event: 1604933737 + ts_sent: 1604933619 + usr: + email: "example@domain.com" + message: |- + { + "date_sent" : "2020-10-09 00:00:00", + "campaign name" : "My First Campaign", + "ts_event" : 1604933737, + "URL" : "https://myCampaignUrl.net", + "segment_ids" : [ 1, 10 ], + "date_event" : "2020-10-09 00:00:00", + "ts_sent" : 1604933619, + "camp_id" : 1, + "id" : 11111, + "tag" : "", + "event" : "click", + "email" : "example@domain.com", + "ts" : 1604937337 + } + service: "marketing-events" + status: "info" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933737000 + - sample: |- + { + "reason" : "deferred", + "date_sent" : "2020-10-09 00:00:00", + "campaign name" : "My First Campaign", + "ts_event" : 1604933737, + "sending_ip" : "10.10.10.10", + "date_event" : "2020-10-09 00:00:00", + "ts_sent" : 1604933619, + "camp_id" : 1, + "id" : 11111, + "tag" : "", + "event" : "hard_bounce", + "email" : "example@domain.com", + "ts" : 1604937337 + } + service: "marketing-events" + result: + custom: + brevo: + camp_id: 1 + campaign name: "My First Campaign" + date_event: "2020-10-09 00:00:00" + date_sent: "2020-10-09 00:00:00" + evt: + name: "hard_bounce" + id: 11111 + network: + client: + geoip: {} + ip: "10.10.10.10" + reason: "deferred" + status: "error" + tag: "" + timestamp: 1.604933737E12 + ts: 1604937337 + ts_event: 1604933737 + ts_sent: 1604933619 + usr: + email: "example@domain.com" + message: |- + { + "reason" : "deferred", + "date_sent" : "2020-10-09 00:00:00", + "campaign name" : "My First Campaign", + "ts_event" : 1604933737, + "sending_ip" : "10.10.10.10", + "date_event" : "2020-10-09 00:00:00", + "ts_sent" : 1604933619, + "camp_id" : 1, + "id" : 11111, + "tag" : "", + "event" : "hard_bounce", + "email" : "example@domain.com", + "ts" : 1604937337 + } + service: "marketing-events" + status: "error" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933737000 + - sample: |- + { + "reason" : "deferred", + "date_sent" : "2020-10-09 00:00:00", + "campaign name" : "My First Campaign", + "ts_event" : 1604933737, + "sending_ip" : "10.10.10.10", + "date_event" : "2020-10-09 00:00:00", + "ts_sent" : 1604933619, + "camp_id" : 1, + "id" : 11111, + "tag" : "", + "event" : "soft_bounce", + "email" : "example@domain.com", + "ts" : 1604937337 + } + service: "marketing-events" + result: + custom: + brevo: + camp_id: 1 + campaign name: "My First Campaign" + date_event: "2020-10-09 00:00:00" + date_sent: "2020-10-09 00:00:00" + evt: + name: "soft_bounce" + id: 11111 + network: + client: + geoip: {} + ip: "10.10.10.10" + reason: "deferred" + status: "warning" + tag: "" + timestamp: 1.604933737E12 + ts: 1604937337 + ts_event: 1604933737 + ts_sent: 1604933619 + usr: + email: "example@domain.com" + message: |- + { + "reason" : "deferred", + "date_sent" : "2020-10-09 00:00:00", + "campaign name" : "My First Campaign", + "ts_event" : 1604933737, + "sending_ip" : "10.10.10.10", + "date_event" : "2020-10-09 00:00:00", + "ts_sent" : 1604933619, + "camp_id" : 1, + "id" : 11111, + "tag" : "", + "event" : "soft_bounce", + "email" : "example@domain.com", + "ts" : 1604937337 + } + service: "marketing-events" + status: "warn" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933737000 + - sample: |- + { + "date_sent" : "2020-10-09 00:00:00", + "sending_ip" : "10.10.10.10", + "date_event" : "2020-10-09 00:00:00", + "campaign name" : "My First Campaign", + "ts_sent" : 1604933619, + "camp_id" : 1, + "id" : 11111, + "tag" : "", + "event" : "delivered", + "ts_event" : 1604933737, + "email" : "example@domain.com", + "ts" : 1604937337 + } + service: "marketing-events" + result: + custom: + brevo: + camp_id: 1 + campaign name: "My First Campaign" + date_event: "2020-10-09 00:00:00" + date_sent: "2020-10-09 00:00:00" + evt: + name: "delivered" + id: 11111 + network: + client: + geoip: {} + ip: "10.10.10.10" + status: "Success" + tag: "" + timestamp: 1.604933737E12 + ts: 1604937337 + ts_event: 1604933737 + ts_sent: 1604933619 + usr: + email: "example@domain.com" + message: |- + { + "date_sent" : "2020-10-09 00:00:00", + "sending_ip" : "10.10.10.10", + "date_event" : "2020-10-09 00:00:00", + "campaign name" : "My First Campaign", + "ts_sent" : 1604933619, + "camp_id" : 1, + "id" : 11111, + "tag" : "", + "event" : "delivered", + "ts_event" : 1604933737, + "email" : "example@domain.com", + "ts" : 1604937337 + } + service: "marketing-events" + status: "ok" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933737000 + - sample: |- + { + "date_sent" : "2020-10-09 00:00:00", + "list_id" : [ 3, 42 ], + "campaign name" : "My First Campaign", + "ts_event" : 1604933737, + "segment_ids" : [ 1, 10 ], + "sending_ip" : "10.10.10.10", + "date_event" : "2020-10-09 00:00:00", + "ts_sent" : 1604933619, + "camp_id" : 1, + "id" : 11111, + "tag" : "", + "event" : "unsubscribe", + "email" : "example@domain.com", + "ts" : 1604937337 + } + service: "marketing-events" + result: + custom: + brevo: + camp_id: 1 + campaign name: "My First Campaign" + date_event: "2020-10-09 00:00:00" + date_sent: "2020-10-09 00:00:00" + evt: + name: "unsubscribe" + id: 11111 + list_id: + - 3 + - 42 + network: + client: + geoip: {} + ip: "10.10.10.10" + segment_ids: + - 1 + - 10 + status: "warning" + tag: "" + timestamp: 1.604933737E12 + ts: 1604937337 + ts_event: 1604933737 + ts_sent: 1604933619 + usr: + email: "example@domain.com" + message: |- + { + "date_sent" : "2020-10-09 00:00:00", + "list_id" : [ 3, 42 ], + "campaign name" : "My First Campaign", + "ts_event" : 1604933737, + "segment_ids" : [ 1, 10 ], + "sending_ip" : "10.10.10.10", + "date_event" : "2020-10-09 00:00:00", + "ts_sent" : 1604933619, + "camp_id" : 1, + "id" : 11111, + "tag" : "", + "event" : "unsubscribe", + "email" : "example@domain.com", + "ts" : 1604937337 + } + service: "marketing-events" + status: "warn" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933737000 + - sample: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "subject" : "My first Transactional", + "contact_id" : 8, + "ts_event" : 1604933654, + "tags" : [ "transac_messages" ], + "mirror_link" : "https://app-smtp.brevo.com/log/preview/22-ab68-900d1x9152c", + "sending_ip" : "10.10.10.10", + "message-id" : "2022283@abx.abc.com", + "template_id" : 22, + "id" : 11111, + "event" : "request", + "ts_epoch" : 1604933654, + "email" : "example@domain.com", + "ts" : 1604933619 + } + service: "transactional-events" + result: + custom: + X-Mailin-custom: "some_custom_header" + contact_id: 8 + date: "2020-10-09 00:00:00" + evt: + name: "request" + id: 11111 + message-id: "2022283@abx.abc.com" + mirror_link: "https://app-smtp.brevo.com/log/preview/22-ab68-900d1x9152c" + network: + client: + geoip: {} + ip: "10.10.10.10" + status: "info" + subject: "My first Transactional" + tags: + - "transac_messages" + template_id: 22 + timestamp: 1604933654 + ts: 1604933619 + ts_event: 1604933654 + usr: + email: "example@domain.com" + message: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "subject" : "My first Transactional", + "contact_id" : 8, + "ts_event" : 1604933654, + "tags" : [ "transac_messages" ], + "mirror_link" : "https://app-smtp.brevo.com/log/preview/22-ab68-900d1x9152c", + "sending_ip" : "10.10.10.10", + "message-id" : "2022283@abx.abc.com", + "template_id" : 22, + "id" : 11111, + "event" : "request", + "ts_epoch" : 1604933654, + "email" : "example@domain.com", + "ts" : 1604933619 + } + service: "transactional-events" + status: "info" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933654 + - sample: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "subject" : "My first Transactional", + "link" : "https://abdomain.com/product", + "device_used" : "DESKTOP", + "contact_id" : 8, + "ts_event" : 1604933654, + "tags" : [ "transac_messages" ], + "mirror_link" : "https://app-smtp.brevo.com/log/preview/jj", + "sending_ip" : "10.10.10.10", + "message-id" : "2022283@abx.abc.com", + "template_id" : 22, + "id" : 11111, + "event" : "click", + "ts_epoch" : 1604933654, + "email" : "example@domain.com", + "user_agent" : "Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko Firefox/11.0 (via ggpht.com GoogleImageProxy)", + "ts" : 1604933619 + } + service: "transactional-events" + result: + custom: + X-Mailin-custom: "some_custom_header" + contact_id: 8 + date: "2020-10-09 00:00:00" + device_used: "DESKTOP" + evt: + name: "click" + http: + url: "https://abdomain.com/product" + url_details: + host: "abdomain.com" + path: "/product" + scheme: "https" + useragent: "Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko Firefox/11.0 (via ggpht.com GoogleImageProxy)" + useragent_details: + browser: + family: "GmailImageProxy" + device: + category: "Desktop" + family: "Other" + os: + family: "Windows" + major: "XP" + id: 11111 + message-id: "2022283@abx.abc.com" + mirror_link: "https://app-smtp.brevo.com/log/preview/jj" + network: + client: + geoip: {} + ip: "10.10.10.10" + status: "info" + subject: "My first Transactional" + tags: + - "transac_messages" + template_id: 22 + timestamp: 1604933654 + ts: 1604933619 + ts_event: 1604933654 + usr: + email: "example@domain.com" + message: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "subject" : "My first Transactional", + "link" : "https://abdomain.com/product", + "device_used" : "DESKTOP", + "contact_id" : 8, + "ts_event" : 1604933654, + "tags" : [ "transac_messages" ], + "mirror_link" : "https://app-smtp.brevo.com/log/preview/jj", + "sending_ip" : "10.10.10.10", + "message-id" : "2022283@abx.abc.com", + "template_id" : 22, + "id" : 11111, + "event" : "click", + "ts_epoch" : 1604933654, + "email" : "example@domain.com", + "user_agent" : "Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko Firefox/11.0 (via ggpht.com GoogleImageProxy)", + "ts" : 1604933619 + } + service: "transactional-events" + status: "info" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933654 + - sample: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "reason" : "spam", + "subject" : "My first Transactional", + "ts_event" : 1604933654, + "tags" : [ "transac_messages" ], + "sending_ip" : "10.10.10.10", + "message-id" : "2022283@abx.abc.com", + "template_id" : 22, + "id" : 11111, + "event" : "deferred", + "ts_epoch" : 1604933654, + "email" : "example@domain.com", + "ts" : 1604933619 + } + service: "transactional-events" + result: + custom: + X-Mailin-custom: "some_custom_header" + date: "2020-10-09 00:00:00" + evt: + name: "deferred" + id: 11111 + message-id: "2022283@abx.abc.com" + network: + client: + geoip: {} + ip: "10.10.10.10" + reason: "spam" + status: "warning" + subject: "My first Transactional" + tags: + - "transac_messages" + template_id: 22 + timestamp: 1604933654 + ts: 1604933619 + ts_event: 1604933654 + usr: + email: "example@domain.com" + message: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "reason" : "spam", + "subject" : "My first Transactional", + "ts_event" : 1604933654, + "tags" : [ "transac_messages" ], + "sending_ip" : "10.10.10.10", + "message-id" : "2022283@abx.abc.com", + "template_id" : 22, + "id" : 11111, + "event" : "deferred", + "ts_epoch" : 1604933654, + "email" : "example@domain.com", + "ts" : 1604933619 + } + service: "transactional-events" + status: "warn" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933654 + - sample: |- + { + "date" : "2024-10-03 11:29:38", + "reason" : "sent", + "subject" : "test", + "ts_event" : 1727935178, + "sending_ip" : "10.10.10.10", + "service" : "transactional-events", + "message-id" : "<20.21@s-ry.mn.fr>", + "template_id" : 27, + "id" : 1141551, + "tag" : "", + "event" : "delivered", + "ts_epoch" : 1727935178000, + "email" : "abc@test.com", + "ts" : 1727935178, + "sender_email" : "abc@test.py" + } + service: "transactional-events" + result: + custom: + date: "2024-10-03 11:29:38" + evt: + name: "delivered" + id: 1141551 + message-id: "<20.21@s-ry.mn.fr>" + network: + client: + geoip: {} + ip: "10.10.10.10" + reason: "sent" + sender_email: "abc@test.py" + service: "transactional-events" + status: "Success" + subject: "test" + tag: "" + template_id: 27 + timestamp: 1727935178000 + ts: 1727935178 + ts_event: 1727935178 + usr: + email: "abc@test.com" + message: |- + { + "date" : "2024-10-03 11:29:38", + "reason" : "sent", + "subject" : "test", + "ts_event" : 1727935178, + "sending_ip" : "10.10.10.10", + "service" : "transactional-events", + "message-id" : "<20.21@s-ry.mn.fr>", + "template_id" : 27, + "id" : 1141551, + "tag" : "", + "event" : "delivered", + "ts_epoch" : 1727935178000, + "email" : "abc@test.com", + "ts" : 1727935178, + "sender_email" : "abc@test.py" + } + service: "transactional-events" + status: "ok" + tags: + - "source:LOGS_SOURCE" + timestamp: 1727935178000 + - sample: |- + { + "date" : "2024-10-03 11:30:46", + "reason" : "Unable to find MX of domain jbsjjsd.com", + "subject" : "test", + "ts_event" : 1727935246, + "sending_ip" : "10.10.10.10", + "service" : "transactional-events", + "message-id" : "<20.211@s-rey.mn.r>", + "template_id" : 27, + "id" : 1141551, + "tag" : "", + "event" : "soft_bounce", + "ts_epoch" : 1727935246000, + "email" : "adss@jbsjjsd.com", + "ts" : 1727935246, + "sender_email" : "test@abc.com" + } + service: "transactional-events" + result: + custom: + date: "2024-10-03 11:30:46" + evt: + name: "soft_bounce" + id: 1141551 + message-id: "<20.211@s-rey.mn.r>" + network: + client: + geoip: {} + ip: "10.10.10.10" + reason: "Unable to find MX of domain jbsjjsd.com" + sender_email: "test@abc.com" + service: "transactional-events" + status: "warning" + subject: "test" + tag: "" + template_id: 27 + timestamp: 1727935246000 + ts: 1727935246 + ts_event: 1727935246 + usr: + email: "adss@jbsjjsd.com" + message: |- + { + "date" : "2024-10-03 11:30:46", + "reason" : "Unable to find MX of domain jbsjjsd.com", + "subject" : "test", + "ts_event" : 1727935246, + "sending_ip" : "10.10.10.10", + "service" : "transactional-events", + "message-id" : "<20.211@s-rey.mn.r>", + "template_id" : 27, + "id" : 1141551, + "tag" : "", + "event" : "soft_bounce", + "ts_epoch" : 1727935246000, + "email" : "adss@jbsjjsd.com", + "ts" : 1727935246, + "sender_email" : "test@abc.com" + } + service: "transactional-events" + status: "warn" + tags: + - "source:LOGS_SOURCE" + timestamp: 1727935246000 + - sample: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "subject" : "My first Transactional", + "device_used" : "DESKTOP", + "contact_id" : 8, + "ts_event" : 1604933654, + "tags" : [ "transac_messages" ], + "mirror_link" : "https://app-smtp.brevo.com/log/preview/1a2000f4-4e33-23aa-ab68-900d1x9152c", + "sending_ip" : "10.10.10.10", + "message-id" : "2022283@abx.abc.com", + "template_id" : 22, + "id" : 11111, + "event" : "unique_opened", + "ts_epoch" : 1604933623, + "email" : "example@domain.com", + "user_agent" : "Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko Firefox/11.0 (via ggpht.com GoogleImageProxy)", + "ts" : 1604933619 + } + service: "transactional-events" + result: + custom: + X-Mailin-custom: "some_custom_header" + contact_id: 8 + date: "2020-10-09 00:00:00" + device_used: "DESKTOP" + evt: + name: "unique_opened" + http: + useragent: "Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko Firefox/11.0 (via ggpht.com GoogleImageProxy)" + useragent_details: + browser: + family: "GmailImageProxy" + device: + category: "Desktop" + family: "Other" + os: + family: "Windows" + major: "XP" + id: 11111 + message-id: "2022283@abx.abc.com" + mirror_link: "https://app-smtp.brevo.com/log/preview/1a2000f4-4e33-23aa-ab68-900d1x9152c" + network: + client: + geoip: {} + ip: "10.10.10.10" + status: "info" + subject: "My first Transactional" + tags: + - "transac_messages" + template_id: 22 + timestamp: 1604933623 + ts: 1604933619 + ts_event: 1604933654 + usr: + email: "example@domain.com" + message: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "subject" : "My first Transactional", + "device_used" : "DESKTOP", + "contact_id" : 8, + "ts_event" : 1604933654, + "tags" : [ "transac_messages" ], + "mirror_link" : "https://app-smtp.brevo.com/log/preview/1a2000f4-4e33-23aa-ab68-900d1x9152c", + "sending_ip" : "10.10.10.10", + "message-id" : "2022283@abx.abc.com", + "template_id" : 22, + "id" : 11111, + "event" : "unique_opened", + "ts_epoch" : 1604933623, + "email" : "example@domain.com", + "user_agent" : "Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko Firefox/11.0 (via ggpht.com GoogleImageProxy)", + "ts" : 1604933619 + } + service: "transactional-events" + status: "info" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933623 + - sample: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "subject" : "My first Transactional", + "message-id" : "2022283@abx.abc.com", + "template_id" : 22, + "id" : 11111, + "event" : "blocked", + "ts_epoch" : 1604933623, + "ts_event" : 1604933654, + "email" : "example@domain.com", + "ts" : 1604933619, + "tags" : [ "transac_messages" ] + } + service: "transactional-events" + result: + custom: + X-Mailin-custom: "some_custom_header" + date: "2020-10-09 00:00:00" + evt: + name: "blocked" + id: 11111 + message-id: "2022283@abx.abc.com" + status: "error" + subject: "My first Transactional" + tags: + - "transac_messages" + template_id: 22 + timestamp: 1604933623 + ts: 1604933619 + ts_event: 1604933654 + usr: + email: "example@domain.com" + message: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "subject" : "My first Transactional", + "message-id" : "2022283@abx.abc.com", + "template_id" : 22, + "id" : 11111, + "event" : "blocked", + "ts_epoch" : 1604933623, + "ts_event" : 1604933654, + "email" : "example@domain.com", + "ts" : 1604933619, + "tags" : [ "transac_messages" ] + } + service: "transactional-events" + status: "error" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933623 + - sample: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "subject" : "My first Transactional", + "device_used" : "DESKTOP", + "contact_id" : 8, + "ts_event" : 1604933654, + "tags" : [ "transac_messages" ], + "mirror_link" : "https://app-smtp.brevo.com/log", + "sending_ip" : "10.10.10.10", + "message-id" : "2022283@abx.abc.com", + "template_id" : 22, + "id" : 11111, + "event" : "opened", + "ts_epoch" : 1604933623, + "email" : "example@domain.com", + "user_agent" : "Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko Firefox/11.0 (via ggpht.com GoogleImageProxy)", + "ts" : 1604933619 + } + service: "transactional-events" + result: + custom: + X-Mailin-custom: "some_custom_header" + contact_id: 8 + date: "2020-10-09 00:00:00" + device_used: "DESKTOP" + evt: + name: "opened" + http: + useragent: "Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko Firefox/11.0 (via ggpht.com GoogleImageProxy)" + useragent_details: + browser: + family: "GmailImageProxy" + device: + category: "Desktop" + family: "Other" + os: + family: "Windows" + major: "XP" + id: 11111 + message-id: "2022283@abx.abc.com" + mirror_link: "https://app-smtp.brevo.com/log" + network: + client: + geoip: {} + ip: "10.10.10.10" + status: "info" + subject: "My first Transactional" + tags: + - "transac_messages" + template_id: 22 + timestamp: 1604933623 + ts: 1604933619 + ts_event: 1604933654 + usr: + email: "example@domain.com" + message: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "subject" : "My first Transactional", + "device_used" : "DESKTOP", + "contact_id" : 8, + "ts_event" : 1604933654, + "tags" : [ "transac_messages" ], + "mirror_link" : "https://app-smtp.brevo.com/log", + "sending_ip" : "10.10.10.10", + "message-id" : "2022283@abx.abc.com", + "template_id" : 22, + "id" : 11111, + "event" : "opened", + "ts_epoch" : 1604933623, + "email" : "example@domain.com", + "user_agent" : "Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko Firefox/11.0 (via ggpht.com GoogleImageProxy)", + "ts" : 1604933619 + } + service: "transactional-events" + status: "info" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933623 + - sample: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "reason" : "server is down", + "subject" : "My first Transactional", + "ts_event" : 1604933654, + "tags" : [ "transac_messages" ], + "sending_ip" : "10.10.10.10", + "message-id" : "12.5787683@abc.domain.com", + "template_id" : 22, + "id" : 11111, + "event" : "hard_bounce", + "ts_epoch" : 1604933653, + "email" : "example@domain.com", + "ts" : 1604933619 + } + service: "transactional-events" + result: + custom: + X-Mailin-custom: "some_custom_header" + date: "2020-10-09 00:00:00" + evt: + name: "hard_bounce" + id: 11111 + message-id: "12.5787683@abc.domain.com" + network: + client: + geoip: {} + ip: "10.10.10.10" + reason: "server is down" + status: "error" + subject: "My first Transactional" + tags: + - "transac_messages" + template_id: 22 + timestamp: 1604933653 + ts: 1604933619 + ts_event: 1604933654 + usr: + email: "example@domain.com" + message: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "reason" : "server is down", + "subject" : "My first Transactional", + "ts_event" : 1604933654, + "tags" : [ "transac_messages" ], + "sending_ip" : "10.10.10.10", + "message-id" : "12.5787683@abc.domain.com", + "template_id" : 22, + "id" : 11111, + "event" : "hard_bounce", + "ts_epoch" : 1604933653, + "email" : "example@domain.com", + "ts" : 1604933619 + } + service: "transactional-events" + status: "error" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933653 + - sample: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "subject" : "My first transactional", + "device_used" : "MOBILE", + "contact_id" : 8, + "ts_event" : 1604933654, + "mirror_link" : "https://app-smtp.brevo.com/log/preview/1a200", + "sending_ip" : "10.10.10.10", + "message-id" : "201798300811.5787683@abs.domain.com", + "template_id" : 22, + "id" : 11111, + "tag" : "[\"transactionalTag\"]", + "event" : "unsubscribed", + "ts_epoch" : 1604933623, + "email" : "example@domain.com", + "user_agent" : "Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko Firefox/11.0 (via ggpht.com GoogleImageProxy)", + "ts" : 1604933619 + } + service: "transactional-events" + result: + custom: + X-Mailin-custom: "some_custom_header" + contact_id: 8 + date: "2020-10-09 00:00:00" + device_used: "MOBILE" + evt: + name: "unsubscribed" + http: + useragent: "Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko Firefox/11.0 (via ggpht.com GoogleImageProxy)" + useragent_details: + browser: + family: "GmailImageProxy" + device: + category: "Desktop" + family: "Other" + os: + family: "Windows" + major: "XP" + id: 11111 + message-id: "201798300811.5787683@abs.domain.com" + mirror_link: "https://app-smtp.brevo.com/log/preview/1a200" + network: + client: + geoip: {} + ip: "10.10.10.10" + status: "warning" + subject: "My first transactional" + tag: "[\"transactionalTag\"]" + template_id: 22 + timestamp: 1604933623 + ts: 1604933619 + ts_event: 1604933654 + usr: + email: "example@domain.com" + message: |- + { + "date" : "2020-10-09 00:00:00", + "X-Mailin-custom" : "some_custom_header", + "subject" : "My first transactional", + "device_used" : "MOBILE", + "contact_id" : 8, + "ts_event" : 1604933654, + "mirror_link" : "https://app-smtp.brevo.com/log/preview/1a200", + "sending_ip" : "10.10.10.10", + "message-id" : "201798300811.5787683@abs.domain.com", + "template_id" : 22, + "id" : 11111, + "tag" : "[\"transactionalTag\"]", + "event" : "unsubscribed", + "ts_epoch" : 1604933623, + "email" : "example@domain.com", + "user_agent" : "Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko Firefox/11.0 (via ggpht.com GoogleImageProxy)", + "ts" : 1604933619 + } + service: "transactional-events" + status: "warn" + tags: + - "source:LOGS_SOURCE" + timestamp: 1604933623 \ No newline at end of file diff --git a/brevo/assets/monitors/brevo_high_block_rate_by_subject.json b/brevo/assets/monitors/brevo_high_block_rate_by_subject.json new file mode 100644 index 0000000000000..f117e49ae2199 --- /dev/null +++ b/brevo/assets/monitors/brevo_high_block_rate_by_subject.json @@ -0,0 +1,92 @@ +{ + "version": 2, + "created_at": "2024-09-11", + "last_updated_at": "2024-09-11", + "title": "High Block Rate by Subject", + "description": "This monitor alerts when block rates for transactional emails by subject line are higher than usual. A high block rate may indicate that unsubscribed recipients are still receiving emails. This monitor helps to identify and resolve problems to improve the deliverability of transactional messages.", + "definition": { + "id": 152860279, + "name": "High Block Rate by Subject", + "type": "log alert", + "query": "formula(\"(query / query1) * 100\").last(\"1d\") > 10", + "message": "{{#is_warning}} \nThe block rate for subject '{{log.attributes.subject}}' has exceeded the threshold. \nCurrent Block Rate: {{value}}% \nThreshold: {{warn_threshold}}% \n{{/is_warning}}\n\n{{#is_alert}} \nThe block rate for subject '{{log.attributes.subject}}' has exceeded the threshold. \nCurrent Block Rate: {{value}}% \nThreshold: {{threshold}}% \n{{/is_alert}}\n\n\n@example@abc.com", + "tags": [ + "brevo" + ], + "options": { + "thresholds": { + "critical": 10, + "warning": 5 + }, + "enable_logs_sample": false, + "notify_audit": false, + "on_missing_data": "default", + "include_tags": false, + "variables": [ + { + "data_source": "logs", + "name": "query", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:blocked" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:request" + }, + "storage": "hot" + } + ], + "notify_by": [ + "*" + ], + "new_group_delay": 0, + "renotify_interval": 0, + "escalation_message": "", + "groupby_simple_monitor": false, + "silenced": {} + }, + "priority": null, + "restriction_policy": { + "bindings": [] + } + }, + "tags": [ + "integration:brevo" + ] +} \ No newline at end of file diff --git a/brevo/assets/monitors/brevo_high_bounce_rate_by_subject.json b/brevo/assets/monitors/brevo_high_bounce_rate_by_subject.json new file mode 100644 index 0000000000000..45a217b69cf19 --- /dev/null +++ b/brevo/assets/monitors/brevo_high_bounce_rate_by_subject.json @@ -0,0 +1,90 @@ +{ + "version": 2, + "created_at": "2024-09-11", + "last_updated_at": "2024-09-11", + "title": "High Bounce Rate by Subject", + "description": "This monitor tracks bounce rates for transactional emails by subject line. High bounce rates may indicate invalid addresses or server issues. Use this monitor to help identify and resolve problems to improve the deliverability of transactional messages.", + "definition": { + "id": 152853564, + "name": "High Bounce Rate by Subject", + "type": "log alert", + "query": "formula(\"(query / query1) * 100\").last(\"1d\") > 10", + "message": "{{#is_warning}} \nThe bounce rate for subject '{{log.attributes.subject}}' has exceeded the threshold. \nCurrent Bounce Rate: {{value}}% \nThreshold: {{warn_threshold}}% \n{{/is_warning}}\n\n{{#is_alert}} \nThe bounce rate for subject '{{log.attributes.subject}}' has exceeded the threshold. \nCurrent Bounce Rate: {{value}}% \nThreshold: {{threshold}}% \n{{/is_alert}}\n\n\n@example@abc.com", + "tags": [ + "brevo" + ], + "options": { + "thresholds": { + "critical": 10, + "warning": 5 + }, + "enable_logs_sample": false, + "notify_audit": false, + "on_missing_data": "default", + "include_tags": false, + "variables": [ + { + "data_source": "logs", + "name": "query", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:(hard_bounce OR soft_bounce)" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:request" + }, + "storage": "hot" + } + ], + "new_group_delay": 0, + "notify_by": [ + "*" + ], + "groupby_simple_monitor": false, + "silenced": {} + }, + "priority": null, + "restriction_policy": { + "bindings": [] + } + }, + "tags": [ + "integration:brevo" + ] +} \ No newline at end of file diff --git a/brevo/assets/monitors/brevo_high_error_rate_by_subject.json b/brevo/assets/monitors/brevo_high_error_rate_by_subject.json new file mode 100644 index 0000000000000..05c3bc064e4ab --- /dev/null +++ b/brevo/assets/monitors/brevo_high_error_rate_by_subject.json @@ -0,0 +1,91 @@ +{ + "version": 2, + "created_at": "2024-09-11", + "last_updated_at": "2024-09-11", + "title": "High Error Rate by Subject", + "description": "Alerts when error rates for transactional emails by subject line are higher than usual. High error rates may indicate issues with content or recipient details. This monitor helps to identify and address problems to ensure reliable delivery of transactional emails.", + "definition": { + "id": 152943106, + "name": "High Error Rate by Subject", + "type": "log alert", + "query": "formula(\"(query / query1) * 100\").last(\"1d\") > 10", + "message": "{{#is_warning}} \nThe error rate for subject '{{log.attributes.subject}}' has exceeded the threshold. \nCurrent Error Rate: {{value}}% \nThreshold: {{warn_threshold}}% \n{{/is_warning}}\n\n{{#is_alert}} \nThe error rate for subject '{{log.attributes.subject}}' has exceeded the threshold. \nCurrent Error Rate: {{value}}% \nThreshold: {{threshold}}% \n{{/is_alert}}\n\n\n@example@abc.com", + "tags": [ + "brevo" + ], + "options": { + "thresholds": { + "critical": 10, + "warning": 5 + }, + "enable_logs_sample": false, + "notify_audit": false, + "on_missing_data": "default", + "include_tags": false, + "variables": [ + { + "data_source": "logs", + "name": "query", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:invalid_email" + }, + "storage": "hot" + }, + { + "data_source": "logs", + "name": "query1", + "indexes": [ + "*" + ], + "compute": { + "aggregation": "count" + }, + "group_by": [ + { + "facet": "@subject", + "limit": 10, + "sort": { + "order": "desc", + "aggregation": "count" + } + } + ], + "search": { + "query": "source:brevo service:transactional-events @evt.name:request" + }, + "storage": "hot" + } + ], + "notify_by": [ + "*" + ], + "new_group_delay": 0, + "renotify_interval": 0, + "escalation_message": "", + "silenced": {} + }, + "priority": null, + "restriction_policy": { + "bindings": [] + } + }, + "tags": [ + "integration:brevo" + ] +} \ No newline at end of file diff --git a/brevo/assets/service_checks.json b/brevo/assets/service_checks.json new file mode 100644 index 0000000000000..fe51488c7066f --- /dev/null +++ b/brevo/assets/service_checks.json @@ -0,0 +1 @@ +[] diff --git a/brevo/images/brevo_marketing_events.png b/brevo/images/brevo_marketing_events.png new file mode 100644 index 0000000000000..179ce85362c2d Binary files /dev/null and b/brevo/images/brevo_marketing_events.png differ diff --git a/brevo/images/brevo_transactional_events.png b/brevo/images/brevo_transactional_events.png new file mode 100644 index 0000000000000..6646cc61c6ade Binary files /dev/null and b/brevo/images/brevo_transactional_events.png differ diff --git a/brevo/manifest.json b/brevo/manifest.json new file mode 100644 index 0000000000000..5512abd8d0365 --- /dev/null +++ b/brevo/manifest.json @@ -0,0 +1,62 @@ +{ + "manifest_version": "2.0.0", + "app_uuid": "df939a65-33b7-4bc8-848f-bfe65c022332", + "app_id": "brevo", + "display_on_public_website": false, + "tile": { + "overview": "README.md#Overview", + "configuration": "README.md#Setup", + "support": "README.md#Support", + "changelog": "CHANGELOG.md", + "description": "Gain insights into Brevo marketing and transactional events.", + "title": "Brevo", + "media": [ + { + "caption": "Brevo - Transactional Events", + "image_url": "images/brevo_transactional_events.png", + "media_type": "image" + }, + { + "caption": "Brevo - Marketing Events", + "image_url": "images/brevo_marketing_events.png", + "media_type": "image" + } + ], + "classifier_tags": [ + "Category::Log Collection", + "Submitted Data Type::Logs", + "Offering::Integration" + ] + }, + "assets": { + "integration": { + "auto_install": false, + "source_type_id": 24822827, + "source_type_name": "Brevo", + "events": { + "creates_events": false + }, + "service_checks": { + "metadata_path": "assets/service_checks.json" + } + }, + "dashboards": { + "Brevo - Transactional Events" : "assets/dashboards/brevo_transactional_events.json", + "Brevo - Marketing Events" : "assets/dashboards/brevo_marketing_events.json" + }, + "logs": { + "source": "brevo" + }, + "monitors": { + "High Block Rate by Subject" : "assets/monitors/brevo_high_block_rate_by_subject.json", + "High Bounce Rate by Subject" : "assets/monitors/brevo_high_bounce_rate_by_subject.json", + "High Error Rate by Subject" : "assets/monitors/brevo_high_error_rate_by_subject.json" + } + }, + "author": { + "support_email": "help@datadoghq.com", + "name": "Datadog", + "homepage": "https://www.datadoghq.com", + "sales_email": "info@datadoghq.com" + } +} diff --git a/cisco_aci/CHANGELOG.md b/cisco_aci/CHANGELOG.md index 25c1008c338ae..550eef386c658 100644 --- a/cisco_aci/CHANGELOG.md +++ b/cisco_aci/CHANGELOG.md @@ -2,7 +2,7 @@ -## 4.1.0 / 2024-10-31 +## 4.1.0 / 2024-10-31 / Agent 7.60.0 ***Added***: diff --git a/cisco_aci/assets/monitors/interface_down.json b/cisco_aci/assets/monitors/interface_down.json new file mode 100644 index 0000000000000..24b964e0c021e --- /dev/null +++ b/cisco_aci/assets/monitors/interface_down.json @@ -0,0 +1,26 @@ +{ + "version": 2, + "created_at": "2024-12-04", + "last_updated_at": "2024-12-04", + "title": "Interface is down on a Cisco ACI device", + "description": "This monitor checks the status of interfaces on Cisco ACI devices. It alerts if one reports as 'down'.", + "definition": { + "name": "[Cisco ACI] Interface {{port.name}} down alert on device {{device_ip.name}} in namespace {{device_namespace.name}}", + "type": "query alert", + "query": "avg(last_5m):avg:cisco_aci.fabric.port.status{status:down} by {device_ip,device_namespace,port} == 1", + "message": "{{#is_alert}}\nInterface {{port.name}} of network device with IP {{device_ip.name}} in namespace {{device_namespace.name}} is reporting DOWN.\n{{/is_alert}}\n\n{{#is_alert_recovery}}\nInterface {{port.name}} of network device with IP {{device_ip.name}} in namespace {{device_namespace.name}} is back online.\n{{/is_alert_recovery}}\n\nTo know more about the status of your device, you can have more information from the [NDM page for the device {{device_namespace.name}}:{{device_ip.name}}](/infrastructure/devices/graph?inspectedDevice={{device_namespace.name}}%3A{{device_ip.name}}&detailsTab=interfaces).", + "tags": [], + "options": { + "thresholds": { + "critical": 1 + }, + "notify_audit": false, + "on_missing_data": "default", + "include_tags": true, + "new_group_delay": 60 + } + }, + "tags": [ + "integration:cisco-aci" + ] +} \ No newline at end of file diff --git a/cisco_aci/datadog_checks/cisco_aci/fabric.py b/cisco_aci/datadog_checks/cisco_aci/fabric.py index 8698ef5a7e09c..2812d145f4596 100644 --- a/cisco_aci/datadog_checks/cisco_aci/fabric.py +++ b/cisco_aci/datadog_checks/cisco_aci/fabric.py @@ -268,5 +268,5 @@ def get_fabric_type(self, obj_type): def submit_interface_status_metric(self, status, tags, hostname): if status: new_tags = tags.copy() - new_tags.extend(["port.status:{}".format(status)]) + new_tags.extend(["status:{}".format(status)]) self.gauge('cisco_aci.fabric.port.status', 1, tags=new_tags, hostname=hostname) diff --git a/cisco_aci/manifest.json b/cisco_aci/manifest.json index 1201cc8ed2374..6a46e198827d0 100644 --- a/cisco_aci/manifest.json +++ b/cisco_aci/manifest.json @@ -50,7 +50,8 @@ }, "monitors": { "CPU usage is high for Cisco ACI device": "assets/monitors/cpu_high.json", - "Health score of device is critical": "assets/monitors/critical_health_score.json" + "Health score of device is critical": "assets/monitors/critical_health_score.json", + "Interface for a Cisco ACI device is down": "assets/monitors/interface_down.json" } } } \ No newline at end of file diff --git a/cisco_aci/tests/test_fabric.py b/cisco_aci/tests/test_fabric.py index 1280ea1459dbc..6f3868715c9bd 100644 --- a/cisco_aci/tests/test_fabric.py +++ b/cisco_aci/tests/test_fabric.py @@ -128,7 +128,7 @@ def test_fabric_mocked(aggregator): 'device_namespace:{}'.format(device_namespace), 'device_hostname:{}'.format(device_hn), 'device_id:{}'.format(interface.device_id), - 'port.status:{}'.format(interface.status), + 'status:{}'.format(interface.status), 'dd.internal.resource:ndm_device_user_tags:{}'.format(interface.device_id), 'dd.internal.resource:ndm_interface_user_tags:{}:{}'.format(interface.device_id, interface.index), ] diff --git a/cisco_sdwan/README.md b/cisco_sdwan/README.md index e383bfb9c47e1..d50607744f24d 100644 --- a/cisco_sdwan/README.md +++ b/cisco_sdwan/README.md @@ -7,7 +7,7 @@ The Cisco SD-WAN integration lets you monitor your Cisco SD-WAN environment with ## Setup -**The Cisco SD-WAN NDM integration is in Beta and will not be billed until it is Generally Available.** +**The Cisco SD-WAN NDM integration is in Preview and will not be billed until it is Generally Available.** Follow the instructions below to install and configure this check for an Agent running on a host. For containerized environments, see the [Autodiscovery Integration Templates][3] for guidance on applying these instructions. diff --git a/cloudera/CHANGELOG.md b/cloudera/CHANGELOG.md index bff7948ba4942..a64f78d510fe6 100644 --- a/cloudera/CHANGELOG.md +++ b/cloudera/CHANGELOG.md @@ -2,7 +2,7 @@ -## 3.2.0 / 2024-10-23 +## 3.2.0 / 2024-10-23 / Agent 7.60.0 ***Added***: diff --git a/container/metadata.csv b/container/metadata.csv index 6e62e7a5c2996..b542732d39dea 100644 --- a/container/metadata.csv +++ b/container/metadata.csv @@ -30,4 +30,5 @@ container.net.sent.packets,gauge,,,,The number of network packets sent (per inte container.pid.open_files,gauge,,,,The number of open file descriptors (Linux only),0,container,open_files, container.pid.thread_count,gauge,,,,The number of threads running inside this container,0,container,thread_count, container.pid.thread_limit,gauge,,,,The maximum number of threads for this container,0,container,thread_limit, +container.restarts,gauge,,,,The number of container restarted,0,container,restarts, container.uptime,gauge,,second,,The container uptime,0,container,uptime, diff --git a/databricks/README.md b/databricks/README.md index 57ad4cc4fa37a..45269de30fc76 100644 --- a/databricks/README.md +++ b/databricks/README.md @@ -1,8 +1,8 @@ # Agent Check: Databricks -
+
Data Jobs Monitoring helps you observe, troubleshoot, and cost-optimize your Databricks jobs and clusters.

-This page is limited to documentation for ingesting Databricks cluster utilization metrics and logs. +This page is limited to documentation for ingesting Databricks model serving metrics and cluster utilization data.
![Databricks default dashboard][21] @@ -23,11 +23,27 @@ Model serving metrics provide insights into how your Databricks model serving i ## Setup ### Installation +Gain insight into the health of your model serving infrastructure by following the [Model Serving Configuration](#model-serving-configuration) instructions. -Monitor Databricks Spark applications with the [Datadog Spark integration][3]. Install the [Datadog Agent][4] on your clusters following the [configuration](#configuration) instructions for your appropriate cluster. After that, install the [Spark integration][23] on Datadog to autoinstall the Databricks Overview dashboard. +Monitor Databricks Spark applications with the [Datadog Spark integration][3]. Install the [Datadog Agent][4] on your clusters following the [configuration](#spark-configuration) instructions for your appropriate cluster. Refer to [Spark Configuration](#spark-configuration) instructions. ### Configuration +#### Model Serving Configuration +1. In your Databricks workspace, click on your profile in the top right corner and go to **Settings**. Select **Developer** in the left side bar. Next to **Access tokens**, click **Manage**. +2. Click **Generate new token**, enter "Datadog Integration" in the **Comment** field, remove the default value in **Lifetime (days)**, and click **Generate**. Take note of your token. + **Important:** + * Make sure you delete the default value in **Lifetime (days)** so that the token doesn't expire and the integration doesn't break. + * Ensure the account generating the token has [CAN VIEW access][30] for the Databricks jobs and clusters you want to monitor. + + As an alternative, follow the [official Databricks documentation][31] to generate an access token for a [service principal][31]. + +3. In Datadog, open the Databricks integration tile. +4. On the **Configure** tab, click **Add Databricks Workspace**. +5. Enter a workspace name, your Databricks workspace URL, and the Databricks token you generated. +6. In the **Select resources to set up collection** section, make sure **Metrics - Model Serving** is **Enabled**. + +#### Spark Configuration Configure the Spark integration to monitor your Apache Spark Cluster on Databricks and collect system and Spark metrics. Each script described below can be modified to suits your needs. For instance, you can: @@ -452,8 +468,10 @@ chmod a+x /tmp/start_datadog.sh ## Data Collected ### Metrics - -See the [Spark integration documentation][8] for a list of metrics collected. +#### Model Serving Metrics +See [metadata.csv][29] for a list of metrics provided by this integration. +#### Spark Metrics +See the [Spark integration documentation][8] for a list of Spark metrics collected. ### Service Checks @@ -501,3 +519,6 @@ Additional helpful documentation, links, and articles: [26]: https://www.datadoghq.com/product/cloud-cost-management/ [27]: https://www.datadoghq.com/product/log-management/ [28]: https://docs.datadoghq.com/integrations/databricks/?tab=driveronly +[29]: https://github.com/DataDog/integrations-core/blob/master/databricks/metadata.csv +[30]: https://docs.databricks.com/en/security/auth-authz/access-control/index.html#job-acls +[31]: https://docs.databricks.com/en/admin/users-groups/service-principals.html#what-is-a-service-principal \ No newline at end of file diff --git a/databricks/images/databricks_setup.png b/databricks/images/databricks_setup.png new file mode 100644 index 0000000000000..72ed0cccfc55d Binary files /dev/null and b/databricks/images/databricks_setup.png differ diff --git a/datadog_checks_base/CHANGELOG.md b/datadog_checks_base/CHANGELOG.md index 31b58665f0adf..03d73bac13c24 100644 --- a/datadog_checks_base/CHANGELOG.md +++ b/datadog_checks_base/CHANGELOG.md @@ -2,7 +2,7 @@ -## 37.2.0 / 2024-12-05 +## 37.2.0 / 2024-12-05 / Agent 7.60.0 ***Added***: diff --git a/datadog_checks_downloader/CHANGELOG.md b/datadog_checks_downloader/CHANGELOG.md index 2516d2e2a3669..af751e52a5ab5 100644 --- a/datadog_checks_downloader/CHANGELOG.md +++ b/datadog_checks_downloader/CHANGELOG.md @@ -8,7 +8,7 @@ * v16 ceremony: bump root layout to v6. ([#19146](https://github.com/DataDog/integrations-core/pull/19146)) -## 6.1.0 / 2024-10-31 +## 6.1.0 / 2024-10-31 / Agent 7.60.0 ***Added***: diff --git a/datadog_cluster_agent/CHANGELOG.md b/datadog_cluster_agent/CHANGELOG.md index ada595d90c490..cc44ae90777ee 100644 --- a/datadog_cluster_agent/CHANGELOG.md +++ b/datadog_cluster_agent/CHANGELOG.md @@ -2,7 +2,7 @@ -## 5.1.0 / 2024-10-31 +## 5.1.0 / 2024-10-31 / Agent 7.60.0 ***Added***: diff --git a/ddev/changelog.d/17353.added b/ddev/changelog.d/17353.added new file mode 100644 index 0000000000000..f954084777bcd --- /dev/null +++ b/ddev/changelog.d/17353.added @@ -0,0 +1 @@ +First version of helper for creating logs saved views. diff --git a/ddev/changelog.d/19252.added b/ddev/changelog.d/19252.added new file mode 100644 index 0000000000000..976e3f4bbada6 --- /dev/null +++ b/ddev/changelog.d/19252.added @@ -0,0 +1 @@ +Add decimal/binary specific byte units diff --git a/ddev/src/ddev/cli/meta/scripts/__init__.py b/ddev/src/ddev/cli/meta/scripts/__init__.py index 5605258b6499d..d70d059bee64c 100644 --- a/ddev/src/ddev/cli/meta/scripts/__init__.py +++ b/ddev/src/ddev/cli/meta/scripts/__init__.py @@ -8,6 +8,7 @@ from ddev.cli.meta.scripts.generate_metrics import generate_metrics from ddev.cli.meta.scripts.monitor import monitor +from ddev.cli.meta.scripts.saved_views import sv from ddev.cli.meta.scripts.serve_openmetrics_payload import serve_openmetrics_payload from ddev.cli.meta.scripts.upgrade_python import upgrade_python @@ -25,4 +26,5 @@ def scripts(): scripts.add_command(remove_labels) scripts.add_command(serve_openmetrics_payload) scripts.add_command(upgrade_python) +scripts.add_command(sv) scripts.add_command(monitor) diff --git a/ddev/src/ddev/cli/meta/scripts/saved_views.py b/ddev/src/ddev/cli/meta/scripts/saved_views.py new file mode 100644 index 0000000000000..be44a5dabc0e0 --- /dev/null +++ b/ddev/src/ddev/cli/meta/scripts/saved_views.py @@ -0,0 +1,74 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import json +import webbrowser +from urllib.parse import parse_qs, urlparse + +import click + +TIP = """ +We're about to open JSON definition of the saved view in your browser. Copy the JSON on that page and paste at +the prompt. Hit any key to continue. +""" + + +def _convert_options(api_options): + stream = api_options["stream"] + asset_options = { + "columns": stream["columns"], + } + for old_f, new_f in { + "showDateColumn": "show_date_column", + "showMessageColumn": "show_message_column", + "messageDisplay": "message_display", + "showTimeline": "show_timeline", + }.items(): + if old_f in stream: + asset_options[new_f] = stream[old_f] + + return asset_options + + +def convert_to_asset(sv_json): + """ + Take saved view json from the API and convert it to an asset definition. + """ + asset_def = {} + logs_view = sv_json["logs_view"] + + keep_as_is = ( + "name", + "type", + "page", + ) + for f in keep_as_is: + asset_def[f] = logs_view[f] + + renames = {"search": "query", "facets": "visible_facets"} + for old_f, new_f in renames.items(): + asset_def[new_f] = logs_view[old_f] + asset_def["options"] = _convert_options(logs_view["options"]) + asset_def["timerange"] = {"interval_ms": logs_view["timerange"]["interval"]} + return asset_def + + +@click.command() +@click.argument('saved_view_permalink', type=str) +def sv(saved_view_permalink): + """ + Helper for working with Logs Saved Views. + + Accepts a permalink to a saved view, then guides you towards creating an asset definition in JSON. + + VERY EARLY VERSION, MAKE SURE TO CHECK --help FOR CHANGES BEFORE USING. + """ + parsed_url = urlparse(saved_view_permalink) + # urllib parses query values as lists, so we must take the first (and only) element as the ID. + sv_id = parse_qs(parsed_url.query)['saved-view-id'][0] + json_url = parsed_url._replace(path=f"/api/v1/logs/views/{sv_id}", query='').geturl() + input(TIP) + webbrowser.open(json_url) + sv_json = json.loads(click.prompt(text="Paste your JSON here, then hit ENTER ", prompt_suffix="> ")) + + click.echo(json.dumps(convert_to_asset(sv_json), indent=2, sort_keys=True)) diff --git a/ddev/src/ddev/cli/validate/metadata_utils.py b/ddev/src/ddev/cli/validate/metadata_utils.py index ec31f5c83b8c1..87eeebf6ce7e4 100644 --- a/ddev/src/ddev/cli/validate/metadata_utils.py +++ b/ddev/src/ddev/cli/validate/metadata_utils.py @@ -392,6 +392,10 @@ "yemeni rial", "zambian kwacha", "zimbabwe gold", + "bit_in_decimal_bytes_family", + "byte_in_decimal_bytes_family", + "bit_in_binary_bytes_family", + "byte_in_binary_bytes_family", } ALLOWED_PREFIXES = ('system.', 'jvm.', 'http.', 'datadog.', 'sftp.', 'process.', 'runtime.', 'otelcol_') diff --git a/godaddy/README.md b/godaddy/README.md index ba537ede595f4..eb6783f312378 100644 --- a/godaddy/README.md +++ b/godaddy/README.md @@ -1,40 +1,39 @@ # GoDaddy ## Overview -GoDaddy is a web hosting and domain registration company that helps individuals and businesses establish their online presence. One of their key offerings is SSL certificate services. GoDaddy provides several types of SSL certificates, including Standard SSL for securing one site, Wildcard SSL for securing multiple subdomains, and advanced solutions for ecommerce sites requiring enhanced security. +[GoDaddy][5] is a web hosting and domain registration company that helps individuals and businesses establish their online presence. One of their key offerings is SSL certificate services. GoDaddy provides several types of SSL certificates, including Standard SSL for securing one site, Wildcard SSL for securing multiple subdomains, and advanced solutions for ecommerce sites requiring enhanced security. The GoDaddy integration collects metrics from SSL certificates and their domains, directing them into Datadog for analysis. This integration provides data points such as the total number of certificates, issued certificates, expired certificates, revoked certificates, and domains associated with each certificate. It also includes specific metrics for certificates nearing expiration. All these metrics are accessible through out-of-the-box dashboards and monitors. ## Setup -### Get API credentials from GoDaddy +### Generate API credentials in GoDaddy -#### Find your GoDaddy API key and API secret +1. Navigate to the [GoDaddy Developer Portal][1]. +2. Sign in with your GoDaddy account. +3. Select "API Keys". +4. Choose "Create New API Key". +5. Provide a name for your API. +6. Select "Production" under Environment. +7. Click "Next". Your API Key is now created. +8. Click on "Got It". -- Navigate to the [GoDaddy Developer Portal][1]. -- Sign in with your GoDaddy account. -- Select "API Keys." -- Choose "Create New API Key." -- Provide a name for your API. -- Select "Production" under Environment. -- Click "Next." Your API Key is now created. -- Copy these credentials for the following configuration steps. Ensure they are stored securely and not exposed in public repositories or insecure locations. -- After storing your API Key and Secret, click on "Got It." +### Find your GoDaddy customer number -#### Find your GoDaddy customer number +1. Go to your GoDaddy [Login & PIN page][2]. You might be prompted to sign in. +2. Under **Login Info**, find your **Customer number** (also known as your **shopper ID**). -- Go to your GoDaddy [Login & PIN page][2]. You might be prompted to sign in. -Under **Login Info**, find your **Customer number** (also known as your **shopper ID**). +### Connect your GoDaddy Account to Datadog -### GoDaddy Datadog Integration Configuration +1. Add your API key, secret key and customer number -Configure the Datadog endpoint to forward GoDaddy metrics to Datadog. +| Parameters | Description | +| ---------------------------------------- | ------------------------------------------------------------ | +| GoDaddy API key | The API Key of your GoDaddy Account | +| GoDaddy secret key | The API Secret of your GoDaddy Account | +| GoDaddy customer number (or shopper ID) | The customer number(shopper ID) of your GoDaddy Account | -1. Navigate to `GoDaddy`. -2. Add your GoDaddy credentials. - - GoDaddy API key - - GoDaddy secret key - - GoDaddy customer number (or shopper ID) +2. Click the Save button to save your settings. ## Data Collected @@ -44,7 +43,9 @@ The GoDaddy integration does not include any logs. ### Metrics -The GoDaddy integration collects and forwards Certificates and their Domains metrics to Datadog. See [metadata.csv][5] for a list of metrics provided by this integration. +The GoDaddy integration collects and forwards Certificates and their Domains metrics to Datadog. + +{{< get-metrics-from-git "godaddy" >}} ### Events @@ -58,4 +59,4 @@ For further assistance, contact [Datadog Support][4]. [2]: https://sso.godaddy.com/security [3]: https://developer.godaddy.com/doc/ [4]: https://docs.datadoghq.com/help/ -[5]: https://github.com/DataDog/integrations-core/blob/master/godaddy/metadata.csv +[5]: https://www.godaddy.com/en-in \ No newline at end of file diff --git a/godaddy/manifest.json b/godaddy/manifest.json index b44480a339206..fdab35a8b219f 100644 --- a/godaddy/manifest.json +++ b/godaddy/manifest.json @@ -2,7 +2,7 @@ "manifest_version": "2.0.0", "app_uuid": "6e01b4b9-7604-4628-9203-c1f042f941aa", "app_id": "godaddy", - "display_on_public_website": false, + "display_on_public_website": true, "tile": { "overview": "README.md#Overview", "configuration": "README.md#Setup", diff --git a/kafka_consumer/CHANGELOG.md b/kafka_consumer/CHANGELOG.md index 7e993f84c168e..e1c2864c1b791 100644 --- a/kafka_consumer/CHANGELOG.md +++ b/kafka_consumer/CHANGELOG.md @@ -2,7 +2,7 @@ -## 6.1.0 / 2024-11-25 +## 6.1.0 / 2024-11-25 / Agent 7.60.0 ***Security***: diff --git a/kubeflow/README.md b/kubeflow/README.md index ac0046026ab9f..124c83c6b5f06 100644 --- a/kubeflow/README.md +++ b/kubeflow/README.md @@ -122,8 +122,6 @@ The Kubeflow integration does not include any events. ### Service Checks -The Kubeflow integration does not include any service checks. - See [service_checks.json][8] for a list of service checks provided by this integration. ## Troubleshooting diff --git a/mailchimp/README.md b/mailchimp/README.md index 18a6679a862c2..063cb6da1935d 100644 --- a/mailchimp/README.md +++ b/mailchimp/README.md @@ -13,29 +13,26 @@ The Mailchimp integration collects metrics from campaigns and lists, directing t ## Setup -### Configuration +### Generate API credentials in Mailchimp -#### Get API credentials for Mailchimp +1. Log in to your [Mailchimp account][2]. +2. Check the url for the Server prefix. It is the `xxxx`part of the url(eg: `https://xxxx.admin.mailchimp.com/`). +3. Click on the profile icon and select Profile option. +4. Navigate to the **Extras** tab and click on **API keys** from the Dropdown. +5. Scroll down to the **Your API Keys** section and click **Create A Key**. +6. Enter your preferred name for the API key and click on **Generate Key**. Your API key is now generated. -1. Log in to your [Mailchimp account][2] and click the profile icon -2. Navigate to the **Extras** tab -3. Click **API keys** -4. Scroll down to the **Your API Keys** section and click **Create A Key** -5. Enter your preferred name for the API key and click **Ok** -6. Once the API key is generated, copy and save it as you will only see it once +### Connect your Mailchimp Account to Datadog -#### Mailchimp DataDog integration configuration +1. Add your Marketing API Key and Server Prefix + |Parameters|Description| + |--------------------|--------------------| + |Marketing API key|API key for your Mailchimp marketing account.| + |Server prefix|Server prefix (for example: us13) of the Mailchimp account.| -Configure the Datadog endpoint to forward Mailchimp metrics to Datadog. +2. Click the Save button to save your settings. -1. Navigate to the `Mailchimp` integration tile in Datadog. -2. Add your Mailchimp credentials. - -| Mailchimp parameters | Description | -| -------------------- | ------------ | -| Marketing API key | API key for your Mailchimp marketing account. | -| Server prefix | Server prefix (for example: us13) of the Mailchimp account. It is the `xxxx` part of `https://xxxx.admin.mailchimp.com/`. | ## Data Collected @@ -43,6 +40,8 @@ Configure the Datadog endpoint to forward Mailchimp metrics to Datadog. The Mailchimp integration collects and forwards campaign and list (audience) metrics to Datadog. +{{< get-metrics-from-git "mailchimp" >}} + ### Service Checks The Mailchimp integration does not include any service checks. diff --git a/mongo/CHANGELOG.md b/mongo/CHANGELOG.md index 61820b10cfe17..682f4352023f3 100644 --- a/mongo/CHANGELOG.md +++ b/mongo/CHANGELOG.md @@ -16,7 +16,7 @@ * Fix crash in DBM operation samples collection when a node is in recovering mode. ([#19080](https://github.com/DataDog/integrations-core/pull/19080)) * Resolved deprecation warning for `collStats` by using `$collStats` aggregation pipeline to collect oplog size in MongoDB 6.2+. ([#19133](https://github.com/DataDog/integrations-core/pull/19133)) -## 8.2.1 / 2024-11-06 +## 8.2.1 / 2024-11-06 / Agent 7.60.0 ***Fixed***: diff --git a/mux/assets/dashboards/mux_metrics.json b/mux/assets/dashboards/mux_metrics.json index 0477d130ad678..1552da7ff184a 100644 --- a/mux/assets/dashboards/mux_metrics.json +++ b/mux/assets/dashboards/mux_metrics.json @@ -32,7 +32,7 @@ "hide_zero_counts": true, "show_status": true, "last_triggered_format": "relative", - "query": "tag:integration:mux $Video_Id", + "query": "tag:integration:mux", "sort": "status,asc", "count": 50, "start": 0, @@ -694,13 +694,28 @@ "title_size": "16", "title_align": "left", "show_legend": false, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "time": {}, "type": "timeseries", "requests": [ { "formulas": [ { "alias": "Playback Failure Percentage", - "formula": "query1 * 100" + "formula": "query1 * 100", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + } } ], "queries": [ @@ -786,13 +801,28 @@ "title_size": "16", "title_align": "left", "show_legend": false, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "time": {}, "type": "timeseries", "requests": [ { "formulas": [ { "alias": "Video Startup Failure Percentage", - "formula": "query1 * 100" + "formula": "query1 * 100", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + } } ], "queries": [ @@ -886,13 +916,20 @@ "value", "sum" ], + "time": {}, "type": "timeseries", "requests": [ { "formulas": [ { "alias": "Exits Before Video Start", - "formula": "query1 * 100" + "formula": "query1 * 100", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + } } ], "queries": [ @@ -986,13 +1023,20 @@ "value", "sum" ], + "time": {}, "type": "timeseries", "requests": [ { "formulas": [ { "alias": "Video Startup Business Exception Percentage", - "formula": "query1 * 100" + "formula": "query1 * 100", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + } } ], "queries": [ @@ -1086,13 +1130,20 @@ "value", "sum" ], + "time": {}, "type": "timeseries", "requests": [ { "formulas": [ { "alias": "Video Startup Business Exception Percentage", - "formula": "query1 * 100" + "formula": "query1 * 100", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + } } ], "queries": [ @@ -1176,6 +1227,7 @@ "title": "Top Videos with High Playback Failure Percentage", "title_size": "16", "title_align": "left", + "time": {}, "type": "toplist", "requests": [ { @@ -1190,7 +1242,13 @@ "response_format": "scalar", "formulas": [ { - "formula": "query1 * 100" + "formula": "query1 * 100", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + } } ], "sort": { @@ -2379,12 +2437,19 @@ "title": "Average Rebuffer Percentage", "title_size": "16", "title_align": "left", + "time": {}, "type": "query_value", "requests": [ { "formulas": [ { - "formula": "query1 * 100" + "formula": "query1 * 100", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + } } ], "queries": [ @@ -2433,13 +2498,20 @@ "value", "sum" ], + "time": {}, "type": "timeseries", "requests": [ { "formulas": [ { "alias": "Rebuffer Percentage", - "formula": "query1 * 100" + "formula": "query1 * 100", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + } } ], "queries": [ @@ -2533,13 +2605,20 @@ "value", "sum" ], + "time": {}, "type": "timeseries", "requests": [ { "formulas": [ { "alias": "Rebuffer Frequency", - "formula": "query1" + "formula": "query1", + "number_format": { + "unit": { + "type": "canonical_unit", + "per_unit_name": "minute" + } + } } ], "queries": [ @@ -2896,13 +2975,20 @@ "value", "sum" ], + "time": {}, "type": "timeseries", "requests": [ { "formulas": [ { "alias": "Weighted Average Bitrate", - "formula": "query1" + "formula": "query1", + "number_format": { + "unit_scale": { + "type": "canonical_unit", + "unit_name": "bit" + } + } } ], "queries": [ @@ -2996,13 +3082,20 @@ "value", "sum" ], + "time": {}, "type": "timeseries", "requests": [ { "formulas": [ { "alias": "Upscale Percentage", - "formula": "query1 * 100" + "formula": "query1 * 100", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + } } ], "queries": [ @@ -3096,13 +3189,20 @@ "value", "sum" ], + "time": {}, "type": "timeseries", "requests": [ { "formulas": [ { "alias": "Downscale Percentage", - "formula": "query1 * 100" + "formula": "query1 * 100", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + } } ], "queries": [ @@ -3197,13 +3297,20 @@ "value", "sum" ], + "time": {}, "type": "timeseries", "requests": [ { "formulas": [ { "alias": "Max Upscale Percentage", - "formula": "query1 * 100" + "formula": "query1 * 100", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + } } ], "queries": [ @@ -3290,13 +3397,28 @@ "title_size": "16", "title_align": "left", "show_legend": false, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "time": {}, "type": "timeseries", "requests": [ { "formulas": [ { "alias": "Max Downscale Percentage", - "formula": "query1 * 100" + "formula": "query1 * 100", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + } } ], "queries": [ diff --git a/mux/images/mux_metrics_3.png b/mux/images/mux_metrics_3.png index 26221160c989b..c62f95004d5b3 100644 Binary files a/mux/images/mux_metrics_3.png and b/mux/images/mux_metrics_3.png differ diff --git a/mux/images/mux_overview.png b/mux/images/mux_overview.png deleted file mode 100644 index 4825148a62c2a..0000000000000 Binary files a/mux/images/mux_overview.png and /dev/null differ diff --git a/mysql/CHANGELOG.md b/mysql/CHANGELOG.md index 8955a0d54701d..30d4995571bb1 100644 --- a/mysql/CHANGELOG.md +++ b/mysql/CHANGELOG.md @@ -9,7 +9,7 @@ * Added the `dbms_flavor` tag to MySQL integration metrics and events to identify the database type. This tag indicates whether the database is MySQL or MariaDB. ([#18950](https://github.com/DataDog/integrations-core/pull/18950)) * Submit database_hostname with database instance and metrics for MySQL, Postgres, and SQLServer ([#18969](https://github.com/DataDog/integrations-core/pull/18969)) -## 14.2.0 / 2024-11-06 +## 14.2.0 / 2024-11-06 / Agent 7.60.0 ***Added***: diff --git a/mysql/metadata.csv b/mysql/metadata.csv index 027b8ce425e3a..604d18414b7f1 100644 --- a/mysql/metadata.csv +++ b/mysql/metadata.csv @@ -108,7 +108,7 @@ mysql.innodb.queries_inside,gauge,,query,,As shown in the FILE I/O section of th mysql.innodb.queries_queued,gauge,,query,,As shown in the FILE I/O section of the SHOW ENGINE INNODB STATUS output.,0,mysql,mysql innodb queries queued, mysql.innodb.read_views,gauge,,,,As shown in the FILE I/O section of the SHOW ENGINE INNODB STATUS output.,0,mysql,mysql innodb read views, mysql.innodb.row_lock_current_waits,gauge,,,,The number of row locks currently being waited for by operations on InnoDB tables.,0,mysql,mysql innodb row_lock_current_waits, -mysql.innodb.row_lock_time,gauge,,fraction,,Fraction of time spent (ms/s) acquiring row locks.,-1,mysql,row lock time, +mysql.innodb.row_lock_time,gauge,,millisecond,,The time spent acquiring row locks.,-1,mysql,row lock time, mysql.innodb.row_lock_waits,gauge,,event,second,The number of times per second a row lock had to be waited for.,0,mysql,innodb row lock waits, mysql.innodb.rows_deleted,gauge,,row,second,Number of rows deleted from InnoDB tables.,0,mysql,mysql innodb rows_deleted, mysql.innodb.rows_inserted,gauge,,row,second,Number of rows inserted into InnoDB tables.,0,mysql,mysql innodb rows_inserted, diff --git a/network/CHANGELOG.md b/network/CHANGELOG.md index ee06a54a087e3..d3ede10e3db78 100644 --- a/network/CHANGELOG.md +++ b/network/CHANGELOG.md @@ -2,7 +2,7 @@ -## 5.1.0 / 2024-10-31 +## 5.1.0 / 2024-10-31 / Agent 7.60.0 ***Added***: diff --git a/nvidia_nim/README.md b/nvidia_nim/README.md index 53d980b7bc0e9..383f3d101f629 100644 --- a/nvidia_nim/README.md +++ b/nvidia_nim/README.md @@ -41,8 +41,6 @@ The NVIDIA NIM integration does not include any events. ### Service Checks -The NVIDIA NIM integration does not include any service checks. - See [service_checks.json][8] for a list of service checks provided by this integration. ## Troubleshooting diff --git a/nvidia_nim/assets/configuration/spec.yaml b/nvidia_nim/assets/configuration/spec.yaml index 6f739175a5acc..b5860dffc173c 100644 --- a/nvidia_nim/assets/configuration/spec.yaml +++ b/nvidia_nim/assets/configuration/spec.yaml @@ -13,4 +13,9 @@ files: openmetrics_endpoint.value.example: http://localhost:8000/metrics openmetrics_endpoint.description: | Endpoint exposing the NVIDIA NIM's Prometheus metrics. For more information refer to: - https://docs.nvidia.com/nim/large-language-models/latest/observability.html \ No newline at end of file + https://docs.nvidia.com/nim/large-language-models/latest/observability.html + - template: logs + example: + - type: docker + source: nvidia_nim + service: \ No newline at end of file diff --git a/nvidia_nim/datadog_checks/nvidia_nim/data/conf.yaml.example b/nvidia_nim/datadog_checks/nvidia_nim/data/conf.yaml.example index c5e8d23aa4e1b..c45982ffbdcd3 100644 --- a/nvidia_nim/datadog_checks/nvidia_nim/data/conf.yaml.example +++ b/nvidia_nim/datadog_checks/nvidia_nim/data/conf.yaml.example @@ -624,3 +624,23 @@ instances: # - # exclude: # - + +## Log Section +## +## type - required - Type of log input source (tcp / udp / file / windows_event). +## port / path / channel_path - required - Set port if type is tcp or udp. +## Set path if type is file. +## Set channel_path if type is windows_event. +## source - required - Attribute that defines which integration sent the logs. +## encoding - optional - For file specifies the file encoding. Default is utf-8. Other +## possible values are utf-16-le and utf-16-be. +## service - optional - The name of the service that generates the log. +## Overrides any `service` defined in the `init_config` section. +## tags - optional - Add tags to the collected logs. +## +## Discover Datadog log collection: https://docs.datadoghq.com/logs/log_collection/ +# +# logs: +# - type: docker +# source: nvidia_nim +# service: diff --git a/nvidia_nim/manifest.json b/nvidia_nim/manifest.json index 800c968407043..44b3b02e9de64 100644 --- a/nvidia_nim/manifest.json +++ b/nvidia_nim/manifest.json @@ -9,7 +9,7 @@ "support": "README.md#Support", "changelog": "CHANGELOG.md", "description": "NVIDIA NIM integration with Datadog enables real-time GPU observability by collecting Prometheus metrics for monitoring.", - "title": "nvidia_nim", + "title": "Nvidia NIM", "media": [], "classifier_tags": [ "Supported OS::Linux", diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index cb8a7c57f2734..6e304cb914357 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -13,7 +13,7 @@ * Add alloydbadmin & alloydbmetadata to default list of databases to exclude from autodiscovery and databases to ignore to prevent failures on GCP AlloyDB for PostgreSQL. ([#19061](https://github.com/DataDog/integrations-core/pull/19061)) -## 22.2.0 / 2024-11-06 +## 22.2.0 / 2024-11-06 / Agent 7.60.0 ***Added***: diff --git a/postgres/changelog.d/19271.fixed b/postgres/changelog.d/19271.fixed new file mode 100644 index 0000000000000..9d981b57621e7 --- /dev/null +++ b/postgres/changelog.d/19271.fixed @@ -0,0 +1 @@ +Fix check for Postgres collect query activity to avoid bugs with in-flight duration and missing blocking pids diff --git a/postgres/datadog_checks/postgres/statement_samples.py b/postgres/datadog_checks/postgres/statement_samples.py index a3a829276b2cf..7a9baf45fef55 100644 --- a/postgres/datadog_checks/postgres/statement_samples.py +++ b/postgres/datadog_checks/postgres/statement_samples.py @@ -41,6 +41,8 @@ TRACK_ACTIVITY_QUERY_SIZE_UNKNOWN_VALUE = -1 +TRACK_ACTIVITY_QUERY_SIZE_SUGGESTED_VALUE = 4096 + SUPPORTED_EXPLAIN_STATEMENTS = frozenset({'select', 'table', 'delete', 'insert', 'replace', 'update', 'with'}) # columns from pg_stat_activity which correspond to attributes common to all databases and are therefore stored in @@ -246,10 +248,9 @@ def _get_active_connections(self): return [dict(row) for row in rows] @tracked_method(agent_check_getter=agent_check_getter, track_result_length=True) - def _get_new_pg_stat_activity(self, available_activity_columns, activity_columns_mapping): + def _get_new_pg_stat_activity(self, available_activity_columns, activity_columns_mapping, collect_activity): start_time = time.time() extra_filters, params = self._get_extra_filters_and_params(filter_stale_idle_conn=True) - report_activity = self._report_activity_event() cur_time_func = "" blocking_func = "" backend_type_predicate = "" @@ -257,10 +258,9 @@ def _get_new_pg_stat_activity(self, available_activity_columns, activity_columns backend_type_predicate = "backend_type != 'client backend' OR" # minimum version for pg_blocking_pids function is v9.6 # only call pg_blocking_pids as often as we collect activity snapshots - if self._check.version >= V9_6 and report_activity: + if self._check.version >= V9_6 and collect_activity: blocking_func = PG_BLOCKING_PIDS_FUNC - if report_activity: - cur_time_func = CURRENT_TIME_FUNC + cur_time_func = CURRENT_TIME_FUNC activity_columns = [activity_columns_mapping.get(col, col) for col in available_activity_columns] query = PG_STAT_ACTIVITY_QUERY.format( backend_type_predicate=backend_type_predicate, @@ -471,7 +471,8 @@ def _collect_statement_samples(self): raw=True, ) return - rows = self._get_new_pg_stat_activity(pg_activity_cols, PG_STAT_ACTIVITY_COLS_MAPPING) + collect_activity = self._report_activity_event() + rows = self._get_new_pg_stat_activity(pg_activity_cols, PG_STAT_ACTIVITY_COLS_MAPPING, collect_activity) rows = self._filter_and_normalize_statement_rows(rows) submitted_count = 0 if self._explain_plan_coll_enabled: @@ -480,7 +481,7 @@ def _collect_statement_samples(self): self._check.database_monitoring_query_sample(json.dumps(e, default=default_json_event_encoding)) submitted_count += 1 - if self._report_activity_event(): + if collect_activity: active_connections = self._get_active_connections() activity_event = self._create_activity_event(rows, active_connections) self._check.database_monitoring_query_activity( @@ -548,8 +549,7 @@ def _collect_statement_samples(self): "gauge", ) - @staticmethod - def _to_active_session(row, track_activity_query_size): + def _to_active_session(self, row, track_activity_query_size): if (row.get('backend_type', 'client backend') != 'client backend') or ( row['state'] is not None and row['state'] != 'idle' ): @@ -557,8 +557,8 @@ def _to_active_session(row, track_activity_query_size): # Create an active_row, for each session by # 1. Removing all null key/value pairs and the original query # 2. if row['statement'] is none, replace with ERROR: failed to obfuscate so we can still collect activity - active_row['query_truncated'] = PostgresStatementSamples._get_truncation_state( - track_activity_query_size, row['query'] + active_row['query_truncated'] = self._get_truncation_state( + track_activity_query_size, row['query'], row['query_signature'] ).value if row['statement'] is None: active_row['statement'] = "ERROR: failed to obfuscate" @@ -690,7 +690,10 @@ def _run_explain_safe(self, dbname, statement, obfuscated_statement, query_signa track_activity_query_size = self._get_track_activity_query_size() - if self._get_truncation_state(track_activity_query_size, statement) == StatementTruncationState.truncated: + if ( + self._get_truncation_state(track_activity_query_size, statement, query_signature) + == StatementTruncationState.truncated + ): return ( None, DBExplainError.query_truncated, @@ -826,7 +829,7 @@ def _collect_plan_for_statement(self, row): "comments": row['dd_comments'], }, "query_truncated": self._get_truncation_state( - self._get_track_activity_query_size(), row['query'] + self._get_track_activity_query_size(), row['query'], row['query_signature'] ).value, }, 'postgres': {k: v for k, v in row.items() if k not in pg_stat_activity_sample_exclude_keys}, @@ -915,8 +918,7 @@ def _report_activity_event(self): def _get_track_activity_query_size(self): return int(self._check.pg_settings.get("track_activity_query_size", TRACK_ACTIVITY_QUERY_SIZE_UNKNOWN_VALUE)) - @staticmethod - def _get_truncation_state(track_activity_query_size, statement): + def _get_truncation_state(self, track_activity_query_size, statement, query_signature): # Only check is a statement is truncated if the value of track_activity_query_size was loaded correctly # to avoid confusingly reporting a wrong indicator by using a default that might be wrong for the database if track_activity_query_size == TRACK_ACTIVITY_QUERY_SIZE_UNKNOWN_VALUE: @@ -930,4 +932,23 @@ def _get_truncation_state(track_activity_query_size, statement): # would falsely report it as a truncated statement statement_bytes = bytes(statement, "utf-8") truncated = len(statement_bytes) >= track_activity_query_size - (MAX_CHARACTER_SIZE_IN_BYTES + 1) - return StatementTruncationState.truncated if truncated else StatementTruncationState.not_truncated + if truncated: + if track_activity_query_size < TRACK_ACTIVITY_QUERY_SIZE_SUGGESTED_VALUE: + self._log.warning( + "Statement with query_signature=%s was truncated. Query size: %d, track_activity_query_size: %d " + "See https://docs.datadoghq.com/database_monitoring/setup_postgres/troubleshooting%s " + "for more details on how to increase the track_activity_query_size setting.", + query_signature, + len(statement_bytes), + track_activity_query_size, + "#query-samples-are-truncated", + ) + else: + self._log.debug( + "Statement with query_signature=%s was truncated. Query size: %d, track_activity_query_size: %d", + query_signature, + len(statement_bytes), + track_activity_query_size, + ) + return StatementTruncationState.truncated + return StatementTruncationState.not_truncated diff --git a/quarkus/CHANGELOG.md b/quarkus/CHANGELOG.md new file mode 100644 index 0000000000000..d0112b5d3908f --- /dev/null +++ b/quarkus/CHANGELOG.md @@ -0,0 +1,4 @@ +# CHANGELOG - Quarkus + + + diff --git a/quarkus/README.md b/quarkus/README.md new file mode 100644 index 0000000000000..514709024d86d --- /dev/null +++ b/quarkus/README.md @@ -0,0 +1,55 @@ +# Agent Check: Quarkus + +## Overview + +This check monitors [Quarkus][1] through the Datadog Agent. + +## Setup + +Follow the instructions below to install and configure this check for an Agent running on a host. For containerized environments, see the [Autodiscovery Integration Templates][3] for guidance on applying these instructions. + +### Installation + +The Quarkus check is included in the [Datadog Agent][2] package. +No additional installation is needed on your server. + +### Configuration + +1. Edit the `quarkus.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your Quarkus performance data. See the [sample quarkus.d/conf.yaml][4] for all available configuration options. + +2. [Restart the Agent][5]. + +### Validation + +[Run the Agent's status subcommand][6] and look for `quarkus` under the Checks section. + +## Data Collected + +### Metrics + +See [metadata.csv][7] for a list of metrics provided by this integration. + +### Events + +The Quarkus integration does not include any events. + +### Service Checks + +The Quarkus integration does not include any service checks. + +See [service_checks.json][8] for a list of service checks provided by this integration. + +## Troubleshooting + +Need help? Contact [Datadog support][9]. + + +[1]: **LINK_TO_INTEGRATION_SITE** +[2]: https://app.datadoghq.com/account/settings/agent/latest +[3]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[4]: https://github.com/DataDog/integrations-core/blob/master/quarkus/datadog_checks/quarkus/data/conf.yaml.example +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[7]: https://github.com/DataDog/integrations-core/blob/master/quarkus/metadata.csv +[8]: https://github.com/DataDog/integrations-core/blob/master/quarkus/assets/service_checks.json +[9]: https://docs.datadoghq.com/help/ diff --git a/quarkus/assets/configuration/spec.yaml b/quarkus/assets/configuration/spec.yaml new file mode 100644 index 0000000000000..69a82c06c7270 --- /dev/null +++ b/quarkus/assets/configuration/spec.yaml @@ -0,0 +1,14 @@ +name: Quarkus +files: +- name: quarkus.yaml + options: + - template: init_config + options: + - template: init_config/default + - template: instances + options: + - template: instances/openmetrics + overrides: + openmetrics_endpoint.value.example: http://localhost:8080/q/metrics + openmetrics_endpoint.description: | + Set this to the endpoint that Quarkus's Micrometer Prometheus MeterRegistry extension exposes. diff --git a/quarkus/assets/dashboards/overview.json b/quarkus/assets/dashboards/overview.json new file mode 100644 index 0000000000000..cd248ecb5d4d4 --- /dev/null +++ b/quarkus/assets/dashboards/overview.json @@ -0,0 +1,431 @@ +{ + "author_name": "Datadog", + "description": "## Quarkus\n\nThis dashboard lets you monitor your applications developed with the Quarkus framework.\n\n**Note: This dashboard only displays out of the box metrics. Tweak it as you add more metrics to your application.**\n\n## Useful Links\n- [Quarkus Homepage](https://quarkus.io/)\n- [How to add metrics in Quarkus](https://quarkus.io/guides/telemetry-micrometer-tutorial#inject-the-meterregistry)", + "layout_type": "ordered", + "template_variables": [ + { + "available_values": [], + "default": "*", + "name": "host", + "prefix": "host" + }, + { + "available_values": [], + "default": "*", + "name": "method", + "prefix": "method" + }, + { + "available_values": [], + "default": "*", + "name": "status", + "prefix": "status" + }, + { + "available_values": [], + "default": "*", + "name": "uri", + "prefix": "uri" + } + ], + "title": "Quarkus Overview", + "widgets": [ + { + "definition": { + "banner_img": "/static/images/logos/quarkus_small.svg", + "layout_type": "ordered", + "show_title": true, + "title": "", + "type": "group", + "widgets": [ + { + "definition": { + "background_color": "white", + "content": "## Quarkus\n\nThis dashboard lets you monitor your applications developed with the Quarkus framework.\n\n**Note: This dashboard only displays out of the box metrics. Tweak it as you add more metrics to your application.**\n", + "font_size": "14", + "has_padding": true, + "show_tick": false, + "text_align": "left", + "tick_edge": "left", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 5685022835071772, + "layout": { + "height": 3, + "width": 3, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "background_color": "white", + "content": "## Useful Links\n- [Quarkus Homepage](https://quarkus.io/)\n- [How to add metrics in Quarkus](https://quarkus.io/guides/telemetry-micrometer-tutorial#inject-the-meterregistry)", + "font_size": "14", + "has_padding": true, + "show_tick": false, + "text_align": "center", + "tick_edge": "left", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 8921963557059570, + "layout": { + "height": 3, + "width": 3, + "x": 3, + "y": 0 + } + } + ] + }, + "id": 4717263751542750, + "layout": { + "height": 6, + "width": 6, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "background_color": "vivid_blue", + "layout_type": "ordered", + "show_title": true, + "title": "Overview", + "type": "group", + "widgets": [ + { + "definition": { + "background_color": "blue", + "content": "See the overall status of your application. The health service check reports whether or not your application is up. The monitor alerts you if the maximum duration for a request exceeds a certain threshold.", + "font_size": "14", + "has_padding": true, + "show_tick": false, + "text_align": "center", + "tick_edge": "left", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 4528647613111842, + "layout": { + "height": 2, + "width": 6, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "check": "quarkus.openmetrics.health", + "group": "$host", + "group_by": [], + "grouping": "check", + "tags": [], + "time": { + "hide_incomplete_cost_data": true + }, + "title": "Quarkus Health Check", + "title_align": "left", + "title_size": "16", + "type": "check_status" + }, + "id": 4975142618182494, + "layout": { + "height": 3, + "width": 2, + "x": 0, + "y": 2 + } + }, + { + "definition": { + "color_preference": "text", + "count": 50, + "display_format": "countsAndList", + "hide_zero_counts": true, + "last_triggered_format": "relative", + "query": "tag:(integration:quarkus)", + "show_last_triggered": false, + "show_priority": false, + "show_status": true, + "sort": "status,asc", + "start": 0, + "summary_type": "monitors", + "title": "Monitor Summary", + "type": "manage_status" + }, + "id": 7873059155305294, + "layout": { + "height": 3, + "width": 4, + "x": 2, + "y": 2 + } + } + ] + }, + "id": 2737008660122334, + "layout": { + "height": 6, + "width": 6, + "x": 6, + "y": 0 + } + }, + { + "definition": { + "background_color": "vivid_pink", + "layout_type": "ordered", + "show_title": true, + "title": "HTTP Server", + "type": "group", + "widgets": [ + { + "definition": { + "background_color": "pink", + "content": "See how many requests your HTTP server is getting and which ones take the longest.", + "font_size": "14", + "has_padding": true, + "show_tick": false, + "text_align": "center", + "tick_edge": "left", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 5193429521650892, + "layout": { + "height": 1, + "width": 12, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "legend_layout": "auto", + "requests": [ + { + "display_type": "line", + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:quarkus.http_server.requests.seconds.max{*}" + } + ], + "response_format": "timeseries", + "style": { + "line_type": "solid", + "line_width": "normal", + "order_by": "values", + "palette": "dog_classic" + } + } + ], + "show_legend": true, + "title": "Longest Request", + "title_align": "left", + "title_size": "16", + "type": "timeseries" + }, + "id": 7305731361762322, + "layout": { + "height": 2, + "width": 4, + "x": 0, + "y": 1 + } + }, + { + "definition": { + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "aggregator": "avg", + "data_source": "metrics", + "name": "query1", + "query": "avg:quarkus.http_server.requests.seconds.max{*} by {uri}" + } + ], + "response_format": "scalar", + "sort": { + "count": 10, + "order_by": [ + { + "index": 0, + "order": "desc", + "type": "formula" + } + ] + } + } + ], + "style": { + "display": { + "legend": "automatic", + "type": "stacked" + } + }, + "title": "URIs with Long Requests", + "title_align": "left", + "title_size": "16", + "type": "toplist" + }, + "id": 2683629281370146, + "layout": { + "height": 2, + "width": 4, + "x": 4, + "y": 1 + } + }, + { + "definition": { + "autoscale": true, + "precision": 2, + "requests": [ + { + "formulas": [ + { + "formula": "query1", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "request" + } + } + } + ], + "queries": [ + { + "aggregator": "avg", + "data_source": "metrics", + "name": "query1", + "query": "avg:quarkus.http_server.requests.seconds.count{*}.as_rate()" + } + ], + "response_format": "scalar" + } + ], + "timeseries_background": { + "type": "area" + }, + "title": "Requests per Second", + "title_align": "left", + "title_size": "16", + "type": "query_value" + }, + "id": 6228596123664624, + "layout": { + "height": 2, + "width": 4, + "x": 8, + "y": 1 + } + } + ] + }, + "id": 880646291321010, + "layout": { + "height": 4, + "width": 12, + "x": 0, + "y": 6 + } + }, + { + "definition": { + "background_color": "white", + "layout_type": "ordered", + "show_title": true, + "title": "Logs", + "type": "group", + "widgets": [ + { + "definition": { + "requests": [ + { + "columns": [ + { + "field": "status_line", + "width": "auto" + }, + { + "field": "timestamp", + "width": "auto" + }, + { + "field": "host", + "width": "auto" + }, + { + "field": "service", + "width": "auto" + }, + { + "field": "content", + "width": "compact" + } + ], + "query": { + "data_source": "logs_stream", + "indexes": [], + "query_string": "source:quarkus", + "sort": { + "column": "timestamp", + "order": "desc" + }, + "storage": "hot" + }, + "response_format": "event_list" + } + ], + "title": "", + "title_align": "left", + "title_size": "16", + "type": "list_stream" + }, + "id": 2489993328338580, + "layout": { + "height": 4, + "width": 12, + "x": 0, + "y": 0 + } + } + ] + }, + "id": 7174398085271826, + "layout": { + "height": 5, + "width": 12, + "x": 0, + "y": 10 + } + } + ] +} \ No newline at end of file diff --git a/quarkus/assets/monitors/long_requests.json b/quarkus/assets/monitors/long_requests.json new file mode 100644 index 0000000000000..f0f48a7d92bc1 --- /dev/null +++ b/quarkus/assets/monitors/long_requests.json @@ -0,0 +1,30 @@ +{ + "version": 2, + "created_at": "2024-12-10", + "last_updated_at": "2024-12-10", + "title": "Some Requests Taking Too Long", + "description": "This monitor alerts you if your longest request is taking too long. This can indicate overall degraded service and that other requests are also taking longer to complete.", + "tags": [ + "integration:quarkus" + ], + "definition": { + "name": "Some requests are taking too long", + "type": "query alert", + "query": "avg(last_5m):avg:quarkus.http_server.requests.seconds.max{*} > 1", + "message": "Detected some requests taking extra long to complete. This merits an investigation because it can be a symptom that the overall service is degraded.", + "tags": [ + "integration:quarkus" + ], + "options": { + "thresholds": { + "critical": 1, + "warning": 0.5 + }, + "notify_audit": false, + "include_tags": false, + "new_host_delay": 300, + "avalanche_window": 10 + }, + "priority": null + } +} diff --git a/quarkus/assets/service_checks.json b/quarkus/assets/service_checks.json new file mode 100644 index 0000000000000..ceeec6c578b3d --- /dev/null +++ b/quarkus/assets/service_checks.json @@ -0,0 +1,17 @@ +[ + { + "agent_version": "7.62.0", + "integration": "Quarkus", + "check": "quarkus.openmetrics.health", + "statuses": [ + "ok", + "critical" + ], + "groups": [ + "host", + "endpoint" + ], + "name": "Quarkus OpenMetrics endpoint health", + "description": "Returns `CRITICAL` if the Agent is unable to connect to the Quarkus OpenMetrics endpoint, otherwise returns `OK`." + } +] diff --git a/quarkus/changelog.d/19196.added b/quarkus/changelog.d/19196.added new file mode 100644 index 0000000000000..aa949b47b7b41 --- /dev/null +++ b/quarkus/changelog.d/19196.added @@ -0,0 +1 @@ +Initial Release \ No newline at end of file diff --git a/quarkus/datadog_checks/__init__.py b/quarkus/datadog_checks/__init__.py new file mode 100644 index 0000000000000..1517d901c0aae --- /dev/null +++ b/quarkus/datadog_checks/__init__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/quarkus/datadog_checks/quarkus/__about__.py b/quarkus/datadog_checks/quarkus/__about__.py new file mode 100644 index 0000000000000..e9541ce83e9e5 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/__about__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__version__ = '0.0.1' diff --git a/quarkus/datadog_checks/quarkus/__init__.py b/quarkus/datadog_checks/quarkus/__init__.py new file mode 100644 index 0000000000000..be45b413005d0 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/__init__.py @@ -0,0 +1,7 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .__about__ import __version__ +from .check import QuarkusCheck + +__all__ = ['__version__', 'QuarkusCheck'] diff --git a/quarkus/datadog_checks/quarkus/check.py b/quarkus/datadog_checks/quarkus/check.py new file mode 100644 index 0000000000000..1d6705a88778e --- /dev/null +++ b/quarkus/datadog_checks/quarkus/check.py @@ -0,0 +1,16 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +from datadog_checks.base import OpenMetricsBaseCheckV2 +from datadog_checks.quarkus.metrics import METRIC_MAP + + +class QuarkusCheck(OpenMetricsBaseCheckV2): + __NAMESPACE__ = 'quarkus' + DEFAULT_METRIC_LIMIT = 0 + + def get_default_config(self): + return { + "metrics": [METRIC_MAP], + } diff --git a/quarkus/datadog_checks/quarkus/config_models/__init__.py b/quarkus/datadog_checks/quarkus/config_models/__init__.py new file mode 100644 index 0000000000000..106fff2032f68 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/config_models/__init__.py @@ -0,0 +1,24 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/quarkus/datadog_checks/quarkus/config_models/defaults.py b/quarkus/datadog_checks/quarkus/config_models/defaults.py new file mode 100644 index 0000000000000..0138cd77a5ea8 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/config_models/defaults.py @@ -0,0 +1,124 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + + +def instance_allow_redirects(): + return True + + +def instance_auth_type(): + return 'basic' + + +def instance_cache_metric_wildcards(): + return True + + +def instance_cache_shared_labels(): + return True + + +def instance_collect_counters_with_distributions(): + return False + + +def instance_collect_histogram_buckets(): + return True + + +def instance_disable_generic_tags(): + return False + + +def instance_empty_default_hostname(): + return False + + +def instance_enable_health_service_check(): + return True + + +def instance_histogram_buckets_as_distributions(): + return False + + +def instance_ignore_connection_errors(): + return False + + +def instance_kerberos_auth(): + return 'disabled' + + +def instance_kerberos_delegate(): + return False + + +def instance_kerberos_force_initiate(): + return False + + +def instance_log_requests(): + return False + + +def instance_min_collection_interval(): + return 15 + + +def instance_non_cumulative_histogram_buckets(): + return False + + +def instance_persist_connections(): + return False + + +def instance_request_size(): + return 16 + + +def instance_skip_proxy(): + return False + + +def instance_tag_by_endpoint(): + return True + + +def instance_telemetry(): + return False + + +def instance_timeout(): + return 10 + + +def instance_tls_ignore_warning(): + return False + + +def instance_tls_use_host_header(): + return False + + +def instance_tls_verify(): + return True + + +def instance_use_latest_spec(): + return False + + +def instance_use_legacy_auth_encoding(): + return True + + +def instance_use_process_start_time(): + return False diff --git a/quarkus/datadog_checks/quarkus/config_models/instance.py b/quarkus/datadog_checks/quarkus/config_models/instance.py new file mode 100644 index 0000000000000..8e39a0e921719 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/config_models/instance.py @@ -0,0 +1,171 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from types import MappingProxyType +from typing import Any, Optional, Union + +from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class AuthToken(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + reader: Optional[MappingProxyType[str, Any]] = None + writer: Optional[MappingProxyType[str, Any]] = None + + +class ExtraMetrics(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra='allow', + frozen=True, + ) + name: Optional[str] = None + type: Optional[str] = None + + +class MetricPatterns(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + exclude: Optional[tuple[str, ...]] = None + include: Optional[tuple[str, ...]] = None + + +class Metrics(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra='allow', + frozen=True, + ) + name: Optional[str] = None + type: Optional[str] = None + + +class Proxy(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + http: Optional[str] = None + https: Optional[str] = None + no_proxy: Optional[tuple[str, ...]] = None + + +class ShareLabels(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + labels: Optional[tuple[str, ...]] = None + match: Optional[tuple[str, ...]] = None + + +class InstanceConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + allow_redirects: Optional[bool] = None + auth_token: Optional[AuthToken] = None + auth_type: Optional[str] = None + aws_host: Optional[str] = None + aws_region: Optional[str] = None + aws_service: Optional[str] = None + cache_metric_wildcards: Optional[bool] = None + cache_shared_labels: Optional[bool] = None + collect_counters_with_distributions: Optional[bool] = None + collect_histogram_buckets: Optional[bool] = None + connect_timeout: Optional[float] = None + disable_generic_tags: Optional[bool] = None + empty_default_hostname: Optional[bool] = None + enable_health_service_check: Optional[bool] = None + exclude_labels: Optional[tuple[str, ...]] = None + exclude_metrics: Optional[tuple[str, ...]] = None + exclude_metrics_by_labels: Optional[MappingProxyType[str, Union[bool, tuple[str, ...]]]] = None + extra_headers: Optional[MappingProxyType[str, Any]] = None + extra_metrics: Optional[tuple[Union[str, MappingProxyType[str, Union[str, ExtraMetrics]]], ...]] = None + headers: Optional[MappingProxyType[str, Any]] = None + histogram_buckets_as_distributions: Optional[bool] = None + hostname_format: Optional[str] = None + hostname_label: Optional[str] = None + ignore_connection_errors: Optional[bool] = None + ignore_tags: Optional[tuple[str, ...]] = None + include_labels: Optional[tuple[str, ...]] = None + kerberos_auth: Optional[str] = None + kerberos_cache: Optional[str] = None + kerberos_delegate: Optional[bool] = None + kerberos_force_initiate: Optional[bool] = None + kerberos_hostname: Optional[str] = None + kerberos_keytab: Optional[str] = None + kerberos_principal: Optional[str] = None + log_requests: Optional[bool] = None + metric_patterns: Optional[MetricPatterns] = None + metrics: Optional[tuple[Union[str, MappingProxyType[str, Union[str, Metrics]]], ...]] = None + min_collection_interval: Optional[float] = None + namespace: Optional[str] = Field(None, pattern='\\w*') + non_cumulative_histogram_buckets: Optional[bool] = None + ntlm_domain: Optional[str] = None + openmetrics_endpoint: str + password: Optional[str] = None + persist_connections: Optional[bool] = None + proxy: Optional[Proxy] = None + raw_line_filters: Optional[tuple[str, ...]] = None + raw_metric_prefix: Optional[str] = None + read_timeout: Optional[float] = None + rename_labels: Optional[MappingProxyType[str, Any]] = None + request_size: Optional[float] = None + service: Optional[str] = None + share_labels: Optional[MappingProxyType[str, Union[bool, ShareLabels]]] = None + skip_proxy: Optional[bool] = None + tag_by_endpoint: Optional[bool] = None + tags: Optional[tuple[str, ...]] = None + telemetry: Optional[bool] = None + timeout: Optional[float] = None + tls_ca_cert: Optional[str] = None + tls_cert: Optional[str] = None + tls_ignore_warning: Optional[bool] = None + tls_private_key: Optional[str] = None + tls_protocols_allowed: Optional[tuple[str, ...]] = None + tls_use_host_header: Optional[bool] = None + tls_verify: Optional[bool] = None + use_latest_spec: Optional[bool] = None + use_legacy_auth_encoding: Optional[bool] = None + use_process_start_time: Optional[bool] = None + username: Optional[str] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'instance_{info.field_name}', identity)(value, field=field) + else: + value = getattr(defaults, f'instance_{info.field_name}', lambda: value)() + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_instance', identity)(model)) diff --git a/quarkus/datadog_checks/quarkus/config_models/shared.py b/quarkus/datadog_checks/quarkus/config_models/shared.py new file mode 100644 index 0000000000000..e39d447dfc4b9 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/config_models/shared.py @@ -0,0 +1,45 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, ConfigDict, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import validators + + +class SharedConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + service: Optional[str] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'shared_{info.field_name}', identity)(value, field=field) + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_shared', identity)(model)) diff --git a/quarkus/datadog_checks/quarkus/config_models/validators.py b/quarkus/datadog_checks/quarkus/config_models/validators.py new file mode 100644 index 0000000000000..70150e85e6124 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/config_models/validators.py @@ -0,0 +1,13 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# Here you can include additional config validators or transformers +# +# def initialize_instance(values, **kwargs): +# if 'my_option' not in values and 'my_legacy_option' in values: +# values['my_option'] = values['my_legacy_option'] +# if values.get('my_number') > 10: +# raise ValueError('my_number max value is 10, got %s' % str(values.get('my_number'))) +# +# return values diff --git a/quarkus/datadog_checks/quarkus/data/conf.yaml.example b/quarkus/datadog_checks/quarkus/data/conf.yaml.example new file mode 100644 index 0000000000000..2de1b5ccf2751 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/data/conf.yaml.example @@ -0,0 +1,593 @@ +## All options defined here are available to all instances. +# +init_config: + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Additionally, this sets the default `service` for every log source. + # + # service: + +## Every instance is scheduled independently of the others. +# +instances: + + ## @param openmetrics_endpoint - string - required + ## Set this to the endpoint that Quarkus's Micrometer Prometheus MeterRegistry extension exposes. + # + - openmetrics_endpoint: http://localhost:8080/q/metrics + + ## @param raw_metric_prefix - string - optional + ## A prefix that is removed from all exposed metric names, if present. + ## All configuration options will use the prefix-less name. + # + # raw_metric_prefix: _ + + ## @param extra_metrics - (list of string or mapping) - optional + ## This list defines metrics to collect from the `openmetrics_endpoint`, in addition to + ## what the check collects by default. If the check already collects a metric, then + ## metric definitions here take precedence. Metrics may be defined in 3 ways: + ## + ## 1. If the item is a string, then it represents the exposed metric name, and + ## the sent metric name will be identical. For example: + ## + ## extra_metrics: + ## - + ## - + ## 2. If the item is a mapping, then the keys represent the exposed metric names. + ## + ## a. If a value is a string, then it represents the sent metric name. For example: + ## + ## extra_metrics: + ## - : + ## - : + ## b. If a value is a mapping, then it must have a `name` and/or `type` key. + ## The `name` represents the sent metric name, and the `type` represents how + ## the metric should be handled, overriding any type information the endpoint + ## may provide. For example: + ## + ## extra_metrics: + ## - : + ## name: + ## type: + ## - : + ## name: + ## type: + ## + ## The supported native types are `gauge`, `counter`, `histogram`, and `summary`. + ## + ## Note: To collect counter metrics with names ending in `_total`, specify the metric name without the `_total` + ## suffix. For example, to collect the counter metric `promhttp_metric_handler_requests_total`, specify + ## `promhttp_metric_handler_requests`. This submits to Datadog the metric name appended with `.count`. + ## For more information, see: + ## https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#suffixes + ## + ## Regular expressions may be used to match the exposed metric names, for example: + ## + ## extra_metrics: + ## - ^network_(ingress|egress)_.+ + ## - .+: + ## type: gauge + # + # extra_metrics: [] + + ## @param exclude_metrics - list of strings - optional + ## A list of metrics to exclude, with each entry being either + ## the exact metric name or a regular expression. + ## In order to exclude all metrics but the ones matching a specific filter, + ## you can use a negative lookahead regex like: + ## - ^(?!foo).*$ + # + # exclude_metrics: [] + + ## @param exclude_metrics_by_labels - mapping - optional + ## A mapping of labels to exclude metrics with matching label name and their corresponding metric values. To match + ## all values of a label, set it to `true`. + ## + ## Note: Label filtering happens before `rename_labels`. + ## + ## For example, the following configuration instructs the check to exclude all metrics with + ## a label `worker` or a label `pid` with the value of either `23` or `42`. + ## + ## exclude_metrics_by_labels: + ## worker: true + ## pid: + ## - '23' + ## - '42' + # + # exclude_metrics_by_labels: {} + + ## @param exclude_labels - list of strings - optional + ## A list of labels to exclude, useful for high cardinality values like timestamps or UUIDs. + ## May be used in conjunction with `include_labels`. + ## Labels defined in `exclude_labels` will take precedence in case of overlap. + ## + ## Note: Label filtering happens before `rename_labels`. + # + # exclude_labels: [] + + ## @param include_labels - list of strings - optional + ## A list of labels to include. May be used in conjunction with `exclude_labels`. + ## Labels defined in `exclude_labels` will take precedence in case of overlap. + ## + ## Note: Label filtering happens before `rename_labels`. + # + # include_labels: [] + + ## @param rename_labels - mapping - optional + ## A mapping of label names to their new names. + # + # rename_labels: + # : + # : + + ## @param enable_health_service_check - boolean - optional - default: true + ## Whether or not to send a service check named `.openmetrics.health` which reports + ## the health of the `openmetrics_endpoint`. + # + # enable_health_service_check: true + + ## @param ignore_connection_errors - boolean - optional - default: false + ## Whether or not to ignore connection errors when scraping `openmetrics_endpoint`. + # + # ignore_connection_errors: false + + ## @param hostname_label - string - optional + ## Override the hostname for every metric submission with the value of one of its labels. + # + # hostname_label: + + ## @param hostname_format - string - optional + ## When `hostname_label` is set, this instructs the check how to format the values. The string + ## `` is replaced by the value of the label defined by `hostname_label`. + # + # hostname_format: + + ## @param collect_histogram_buckets - boolean - optional - default: true + ## Whether or not to send histogram buckets. + # + # collect_histogram_buckets: true + + ## @param non_cumulative_histogram_buckets - boolean - optional - default: false + ## Whether or not histogram buckets are non-cumulative and to come with a `lower_bound` tag. + # + # non_cumulative_histogram_buckets: false + + ## @param histogram_buckets_as_distributions - boolean - optional - default: false + ## Whether or not to send histogram buckets as Datadog distribution metrics. This implicitly + ## enables the `collect_histogram_buckets` and `non_cumulative_histogram_buckets` options. + ## + ## Learn more about distribution metrics: + ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#metric-types + # + # histogram_buckets_as_distributions: false + + ## @param collect_counters_with_distributions - boolean - optional - default: false + ## Whether or not to also collect the observation counter metrics ending in `.sum` and `.count` + ## when sending histogram buckets as Datadog distribution metrics. This implicitly enables the + ## `histogram_buckets_as_distributions` option. + # + # collect_counters_with_distributions: false + + ## @param use_process_start_time - boolean - optional - default: false + ## Whether to enable a heuristic for reporting counter values on the first scrape. When true, + ## the first time an endpoint is scraped, check `process_start_time_seconds` to decide whether zero + ## initial value can be assumed for counters. This requires keeping metrics in memory until the entire + ## response is received. + # + # use_process_start_time: false + + ## @param share_labels - mapping - optional + ## This mapping allows for the sharing of labels across multiple metrics. The keys represent the + ## exposed metrics from which to share labels, and the values are mappings that configure the + ## sharing behavior. Each mapping must have at least one of the following keys: + ## + ## labels - This is a list of labels to share. All labels are shared if this is not set. + ## match - This is a list of labels to match on other metrics as a condition for sharing. + ## values - This is a list of allowed values as a condition for sharing. + ## + ## To unconditionally share all labels of a metric, set it to `true`. + ## + ## For example, the following configuration instructs the check to apply all labels from `metric_a` + ## to all other metrics, the `node` label from `metric_b` to only those metrics that have a `pod` + ## label value that matches the `pod` label value of `metric_b`, and all labels from `metric_c` + ## to all other metrics if their value is equal to `23` or `42`. + ## + ## share_labels: + ## metric_a: true + ## metric_b: + ## labels: + ## - node + ## match: + ## - pod + ## metric_c: + ## values: + ## - 23 + ## - 42 + # + # share_labels: {} + + ## @param cache_shared_labels - boolean - optional - default: true + ## When `share_labels` is set, it instructs the check to cache labels collected from the first payload + ## for improved performance. + ## + ## Set this to `false` to compute label sharing for every payload at the risk of potentially increased memory usage. + # + # cache_shared_labels: true + + ## @param raw_line_filters - list of strings - optional + ## A list of regular expressions used to exclude lines read from the `openmetrics_endpoint` + ## from being parsed. + # + # raw_line_filters: [] + + ## @param cache_metric_wildcards - boolean - optional - default: true + ## Whether or not to cache data from metrics that are defined by regular expressions rather + ## than the full metric name. + # + # cache_metric_wildcards: true + + ## @param telemetry - boolean - optional - default: false + ## Whether or not to submit metrics prefixed by `.telemetry.` for debugging purposes. + # + # telemetry: false + + ## @param ignore_tags - list of strings - optional + ## A list of regular expressions used to ignore tags added by Autodiscovery and entries in the `tags` option. + # + # ignore_tags: + # - + # - + # - + + ## @param proxy - mapping - optional + ## This overrides the `proxy` setting in `init_config`. + ## + ## Set HTTP or HTTPS proxies for this instance. Use the `no_proxy` list + ## to specify hosts that must bypass proxies. + ## + ## The SOCKS protocol is also supported, for example: + ## + ## socks5://user:pass@host:port + ## + ## Using the scheme `socks5` causes the DNS resolution to happen on the + ## client, rather than on the proxy server. This is in line with `curl`, + ## which uses the scheme to decide whether to do the DNS resolution on + ## the client or proxy. If you want to resolve the domains on the proxy + ## server, use `socks5h` as the scheme. + # + # proxy: + # http: http://: + # https: https://: + # no_proxy: + # - + # - + + ## @param skip_proxy - boolean - optional - default: false + ## This overrides the `skip_proxy` setting in `init_config`. + ## + ## If set to `true`, this makes the check bypass any proxy + ## settings enabled and attempt to reach services directly. + # + # skip_proxy: false + + ## @param auth_type - string - optional - default: basic + ## The type of authentication to use. The available types (and related options) are: + ## + ## - basic + ## |__ username + ## |__ password + ## |__ use_legacy_auth_encoding + ## - digest + ## |__ username + ## |__ password + ## - ntlm + ## |__ ntlm_domain + ## |__ password + ## - kerberos + ## |__ kerberos_auth + ## |__ kerberos_cache + ## |__ kerberos_delegate + ## |__ kerberos_force_initiate + ## |__ kerberos_hostname + ## |__ kerberos_keytab + ## |__ kerberos_principal + ## - aws + ## |__ aws_region + ## |__ aws_host + ## |__ aws_service + ## + ## The `aws` auth type relies on boto3 to automatically gather AWS credentials, for example: from `.aws/credentials`. + ## Details: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#configuring-credentials + # + # auth_type: basic + + ## @param use_legacy_auth_encoding - boolean - optional - default: true + ## When `auth_type` is set to `basic`, this determines whether to encode as `latin1` rather than `utf-8`. + # + # use_legacy_auth_encoding: true + + ## @param username - string - optional + ## The username to use if services are behind basic or digest auth. + # + # username: + + ## @param password - string - optional + ## The password to use if services are behind basic or NTLM auth. + # + # password: + + ## @param ntlm_domain - string - optional + ## If your services use NTLM authentication, specify + ## the domain used in the check. For NTLM Auth, append + ## the username to domain, not as the `username` parameter. + # + # ntlm_domain: \ + + ## @param kerberos_auth - string - optional - default: disabled + ## If your services use Kerberos authentication, you can specify the Kerberos + ## strategy to use between: + ## + ## - required + ## - optional + ## - disabled + ## + ## See https://github.com/requests/requests-kerberos#mutual-authentication + # + # kerberos_auth: disabled + + ## @param kerberos_cache - string - optional + ## Sets the KRB5CCNAME environment variable. + ## It should point to a credential cache with a valid TGT. + # + # kerberos_cache: + + ## @param kerberos_delegate - boolean - optional - default: false + ## Set to `true` to enable Kerberos delegation of credentials to a server that requests delegation. + ## + ## See https://github.com/requests/requests-kerberos#delegation + # + # kerberos_delegate: false + + ## @param kerberos_force_initiate - boolean - optional - default: false + ## Set to `true` to preemptively initiate the Kerberos GSS exchange and + ## present a Kerberos ticket on the initial request (and all subsequent). + ## + ## See https://github.com/requests/requests-kerberos#preemptive-authentication + # + # kerberos_force_initiate: false + + ## @param kerberos_hostname - string - optional + ## Override the hostname used for the Kerberos GSS exchange if its DNS name doesn't + ## match its Kerberos hostname, for example: behind a content switch or load balancer. + ## + ## See https://github.com/requests/requests-kerberos#hostname-override + # + # kerberos_hostname: + + ## @param kerberos_principal - string - optional + ## Set an explicit principal, to force Kerberos to look for a + ## matching credential cache for the named user. + ## + ## See https://github.com/requests/requests-kerberos#explicit-principal + # + # kerberos_principal: + + ## @param kerberos_keytab - string - optional + ## Set the path to your Kerberos key tab file. + # + # kerberos_keytab: + + ## @param auth_token - mapping - optional + ## This allows for the use of authentication information from dynamic sources. + ## Both a reader and writer must be configured. + ## + ## The available readers are: + ## + ## - type: file + ## path (required): The absolute path for the file to read from. + ## pattern: A regular expression pattern with a single capture group used to find the + ## token rather than using the entire file, for example: Your secret is (.+) + ## - type: oauth + ## url (required): The token endpoint. + ## client_id (required): The client identifier. + ## client_secret (required): The client secret. + ## basic_auth: Whether the provider expects credentials to be transmitted in + ## an HTTP Basic Auth header. The default is: false + ## options: Mapping of additional options to pass to the provider, such as the audience + ## or the scope. For example: + ## options: + ## audience: https://example.com + ## scope: read:example + ## + ## The available writers are: + ## + ## - type: header + ## name (required): The name of the field, for example: Authorization + ## value: The template value, for example `Bearer `. The default is: + ## placeholder: The substring in `value` to replace with the token, defaults to: + # + # auth_token: + # reader: + # type: + # : + # : + # writer: + # type: + # : + # : + + ## @param aws_region - string - optional + ## If your services require AWS Signature Version 4 signing, set the region. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_region: + + ## @param aws_host - string - optional + ## If your services require AWS Signature Version 4 signing, set the host. + ## This only needs the hostname and does not require the protocol (HTTP, HTTPS, and more). + ## For example, if connecting to https://us-east-1.amazonaws.com/, set `aws_host` to `us-east-1.amazonaws.com`. + ## + ## Note: This setting is not necessary for official integrations. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_host: + + ## @param aws_service - string - optional + ## If your services require AWS Signature Version 4 signing, set the service code. For a list + ## of available service codes, see https://docs.aws.amazon.com/general/latest/gr/rande.html + ## + ## Note: This setting is not necessary for official integrations. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_service: + + ## @param tls_verify - boolean - optional - default: true + ## Instructs the check to validate the TLS certificate of services. + # + # tls_verify: true + + ## @param tls_use_host_header - boolean - optional - default: false + ## If a `Host` header is set, this enables its use for SNI (matching against the TLS certificate CN or SAN). + # + # tls_use_host_header: false + + ## @param tls_ignore_warning - boolean - optional - default: false + ## If `tls_verify` is disabled, security warnings are logged by the check. + ## Disable those by setting `tls_ignore_warning` to true. + # + # tls_ignore_warning: false + + ## @param tls_cert - string - optional + ## The path to a single file in PEM format containing a certificate as well as any + ## number of CA certificates needed to establish the certificate's authenticity for + ## use when connecting to services. It may also contain an unencrypted private key to use. + # + # tls_cert: + + ## @param tls_private_key - string - optional + ## The unencrypted private key to use for `tls_cert` when connecting to services. This is + ## required if `tls_cert` is set and it does not already contain a private key. + # + # tls_private_key: + + ## @param tls_ca_cert - string - optional + ## The path to a file of concatenated CA certificates in PEM format or a directory + ## containing several CA certificates in PEM format. If a directory, the directory + ## must have been processed using the `openssl rehash` command. See: + ## https://www.openssl.org/docs/man3.2/man1/c_rehash.html + # + # tls_ca_cert: + + ## @param tls_protocols_allowed - list of strings - optional + ## The expected versions of TLS/SSL when fetching intermediate certificates. + ## Only `SSLv3`, `TLSv1.2`, `TLSv1.3` are allowed by default. The possible values are: + ## SSLv3 + ## TLSv1 + ## TLSv1.1 + ## TLSv1.2 + ## TLSv1.3 + # + # tls_protocols_allowed: + # - SSLv3 + # - TLSv1.2 + # - TLSv1.3 + + ## @param headers - mapping - optional + ## The headers parameter allows you to send specific headers with every request. + ## You can use it for explicitly specifying the host header or adding headers for + ## authorization purposes. + ## + ## This overrides any default headers. + # + # headers: + # Host: + # X-Auth-Token: + + ## @param extra_headers - mapping - optional + ## Additional headers to send with every request. + # + # extra_headers: + # Host: + # X-Auth-Token: + + ## @param timeout - number - optional - default: 10 + ## The timeout for accessing services. + ## + ## This overrides the `timeout` setting in `init_config`. + # + # timeout: 10 + + ## @param connect_timeout - number - optional + ## The connect timeout for accessing services. Defaults to `timeout`. + # + # connect_timeout: + + ## @param read_timeout - number - optional + ## The read timeout for accessing services. Defaults to `timeout`. + # + # read_timeout: + + ## @param request_size - number - optional - default: 16 + ## The number of kibibytes (KiB) to read from streaming HTTP responses at a time. + # + # request_size: 16 + + ## @param log_requests - boolean - optional - default: false + ## Whether or not to debug log the HTTP(S) requests made, including the method and URL. + # + # log_requests: false + + ## @param persist_connections - boolean - optional - default: false + ## Whether or not to persist cookies and use connection pooling for improved performance. + # + # persist_connections: false + + ## @param allow_redirects - boolean - optional - default: true + ## Whether or not to allow URL redirection. + # + # allow_redirects: true + + ## @param tags - list of strings - optional + ## A list of tags to attach to every metric and service check emitted by this instance. + ## + ## Learn more about tagging at https://docs.datadoghq.com/tagging + # + # tags: + # - : + # - : + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Overrides any `service` defined in the `init_config` section. + # + # service: + + ## @param min_collection_interval - number - optional - default: 15 + ## This changes the collection interval of the check. For more information, see: + ## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval + # + # min_collection_interval: 15 + + ## @param empty_default_hostname - boolean - optional - default: false + ## This forces the check to send metrics with no hostname. + ## + ## This is useful for cluster-level checks. + # + # empty_default_hostname: false + + ## @param metric_patterns - mapping - optional + ## A mapping of metrics to include or exclude, with each entry being a regular expression. + ## + ## Metrics defined in `exclude` will take precedence in case of overlap. + # + # metric_patterns: + # include: + # - + # exclude: + # - diff --git a/quarkus/datadog_checks/quarkus/metrics.py b/quarkus/datadog_checks/quarkus/metrics.py new file mode 100644 index 0000000000000..a753aa86c965c --- /dev/null +++ b/quarkus/datadog_checks/quarkus/metrics.py @@ -0,0 +1,53 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +gauges_counters = { + 'http_server_active_requests': 'http_server.active_requests', + 'http_server_bytes_read_max': 'http_server.bytes_read.max', + 'http_server_bytes_written_max': 'http_server.bytes_written.max', + 'http_server_connections_seconds_max': 'http_server.connections.seconds.max', + 'http_server_requests_seconds_max': 'http_server.requests.seconds.max', + 'jvm_buffer_count_buffers': 'jvm.buffer.count_buffers', + 'jvm_buffer_memory_used_bytes': 'jvm.buffer.memory_used.bytes', + 'jvm_buffer_total_capacity_bytes': 'jvm.buffer.total_capacity.bytes', + 'jvm_classes_loaded_classes': 'jvm.classes.loaded_classes', + 'jvm_gc_live_data_size_bytes': 'jvm.gc.live_data_size.bytes', + 'jvm_gc_max_data_size_bytes': 'jvm.gc.max_data_size.bytes', + 'jvm_gc_overhead': 'jvm.gc.overhead', + 'jvm_memory_committed_bytes': 'jvm.memory.committed.bytes', + 'jvm_memory_max_bytes': 'jvm.memory.max.bytes', + 'jvm_memory_usage_after_gc': 'jvm.memory.usage_after_gc', + 'jvm_memory_used_bytes': 'jvm.memory.used.bytes', + 'jvm_threads_daemon_threads': 'jvm.threads.daemon_threads', + 'jvm_threads_live_threads': 'jvm.threads.live_threads', + 'jvm_threads_peak_threads': 'jvm.threads.peak_threads', + 'jvm_threads_states_threads': 'jvm.threads.states_threads', + 'netty_allocator_memory_pinned': 'netty.allocator.memory.pinned', + 'netty_allocator_memory_used': 'netty.allocator.memory.used', + 'netty_allocator_pooled_arenas': 'netty.allocator.pooled.arenas', + 'netty_allocator_pooled_cache_size': 'netty.allocator.pooled.cache_size', + 'netty_allocator_pooled_chunk_size': 'netty.allocator.pooled.chunk_size', + 'netty_allocator_pooled_threadlocal_caches': 'netty.allocator.pooled.threadlocal_caches', + 'netty_eventexecutor_tasks_pending': 'netty.eventexecutor.tasks_pending', + 'process_cpu_usage': 'process.cpu.usage', + 'process_files_max_files': 'process.files.max_files', + 'process_files_open_files': 'process.files.open_files', + 'process_uptime_seconds': 'process.uptime.seconds', + 'system_cpu_count': 'system.cpu.count', + 'system_cpu_usage': 'system.cpu.usage', + 'system_load_average_1m': 'system.load_average_1m', + 'worker_pool_active': 'worker_pool.active', + 'worker_pool_idle': 'worker_pool.idle', + 'worker_pool_queue_delay_seconds_max': 'worker_pool.queue.delay.seconds.max', + 'worker_pool_queue_size': 'worker_pool.queue.size', + 'worker_pool_ratio': 'worker_pool.ratio', + 'worker_pool_usage_seconds_max': 'worker_pool.usage.seconds.max', +} +summaries = { + 'http_server_bytes_read': 'http_server.bytes_read', + 'http_server_bytes_written': 'http_server.bytes_written', + 'http_server_requests_seconds': 'http_server.requests.seconds', + 'worker_pool_queue_delay_seconds': 'worker_pool.queue.delay.seconds', + 'worker_pool_usage_seconds': 'worker_pool.usage.seconds', +} +METRIC_MAP = {**gauges_counters, **summaries} diff --git a/quarkus/hatch.toml b/quarkus/hatch.toml new file mode 100644 index 0000000000000..c85c5f07a7df2 --- /dev/null +++ b/quarkus/hatch.toml @@ -0,0 +1,4 @@ +[env.collectors.datadog-checks] + +[[envs.default.matrix]] +python = ["3.12"] diff --git a/quarkus/manifest.json b/quarkus/manifest.json new file mode 100644 index 0000000000000..27a97dfcd0dcf --- /dev/null +++ b/quarkus/manifest.json @@ -0,0 +1,60 @@ +{ + "manifest_version": "2.0.0", + "app_uuid": "78e72ed2-6ea6-4186-9e57-2015a4a52afc", + "app_id": "quarkus", + "display_on_public_website": false, + "tile": { + "overview": "README.md#Overview", + "configuration": "README.md#Setup", + "support": "README.md#Support", + "changelog": "CHANGELOG.md", + "description": "Monitor your application built with Quarkus.", + "title": "Quarkus", + "media": [], + "classifier_tags": [ + "Supported OS::Linux", + "Supported OS::Windows", + "Supported OS::macOS", + "Category::Metrics", + "Offering::Integration", + "Queried Data Type::Metrics", + "Submitted Data Type::Metrics" + ] + }, + "assets": { + "integration": { + "auto_install": true, + "source_type_id": 29763785, + "source_type_name": "Quarkus", + "configuration": { + "spec": "assets/configuration/spec.yaml" + }, + "events": { + "creates_events": false + }, + "metrics": { + "prefix": "quarkus.", + "check": "quarkus.process.cpu.usage", + "metadata_path": "metadata.csv" + }, + "process_signatures": [ + "java quarkus-run.jar" + ], + "service_checks": { + "metadata_path": "assets/service_checks.json" + } + }, + "dashboards": { + "Quarkus Overview": "assets/dashboards/overview.json" + }, + "monitors": { + "Long Requests": "assets/monitors/long_requests.json" + } + }, + "author": { + "support_email": "help@datadoghq.com", + "name": "Datadog", + "homepage": "https://www.datadoghq.com", + "sales_email": "info@datadoghq.com" + } +} diff --git a/quarkus/metadata.csv b/quarkus/metadata.csv new file mode 100644 index 0000000000000..4e4869d8325eb --- /dev/null +++ b/quarkus/metadata.csv @@ -0,0 +1,51 @@ +metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name,curated_metric,sample_tags +quarkus.http_server.active_requests,gauge,,request,,Requests to the server that are active right now.,0,quarkus,,, +quarkus.http_server.bytes_read.count,count,,,,Number of times some bytes were received by the server.,0,quarkus,,, +quarkus.http_server.bytes_read.max,gauge,,byte,,Maximum number of bytes currently received by the server.,0,quarkus,,, +quarkus.http_server.bytes_read.sum,count,,byte,,Total number of bytes received by the server since it started.,0,quarkus,,, +quarkus.http_server.bytes_written.count,count,,,,Number of times some bytes were by the server.,0,quarkus,,, +quarkus.http_server.bytes_written.max,gauge,,byte,,Current maximum number of bytes sent by the server.,0,quarkus,,, +quarkus.http_server.bytes_written.sum,count,,byte,,Total number of bytes sent by the server.,0,quarkus,,, +quarkus.http_server.connections.seconds.max,gauge,,second,,The duration of the connections in seconds.,0,quarkus,,, +quarkus.http_server.requests.seconds.count,count,,,,The number of requests observed so far.,0,quarkus,,, +quarkus.http_server.requests.seconds.max,gauge,,second,,The current longest request duration in seconds.,0,quarkus,,, +quarkus.http_server.requests.seconds.sum,count,,second,,Total number of seconds that all requests took so far.,0,quarkus,,, +quarkus.jvm.buffer.count_buffers,gauge,,buffer,,An estimate of the number of buffers in the pool.,0,quarkus,,, +quarkus.jvm.buffer.memory_used.bytes,gauge,,byte,,An estimate of the memory that the Java virtual machine is using for this buffer pool.,0,quarkus,,, +quarkus.jvm.buffer.total_capacity.bytes,gauge,,byte,,An estimate of the total capacity of the buffers in this pool.,0,quarkus,,, +quarkus.jvm.classes.loaded_classes,gauge,,,,The number of classes that are currently loaded in the Java virtual machine.,0,quarkus,,, +quarkus.jvm.gc.live_data_size.bytes,gauge,,byte,,Size of long-lived heap memory pool after reclamation.,0,quarkus,,, +quarkus.jvm.gc.max_data_size.bytes,gauge,,byte,,Max size of long-lived heap memory pool.,0,quarkus,,, +quarkus.jvm.gc.overhead,gauge,,,,"An approximation of the percent of CPU time used by GC activities over the last lookback period or since monitoring began, whichever is shorter, in the range [0..1].",0,quarkus,,, +quarkus.jvm.memory.committed.bytes,gauge,,byte,,The amount of memory in bytes that is committed for the Java virtual machine to use.,0,quarkus,,, +quarkus.jvm.memory.max.bytes,gauge,,byte,,The maximum amount of memory in bytes that can be used for memory management.,0,quarkus,,, +quarkus.jvm.memory.usage_after_gc,gauge,,fraction,,"The percentage of long-lived heap pool used after the last GC event, in the range [0..1].",0,quarkus,,, +quarkus.jvm.memory.used.bytes,gauge,,byte,,The amount of used memory.,0,quarkus,,, +quarkus.jvm.threads.daemon_threads,gauge,,thread,,The current number of live daemon threads.,0,quarkus,,, +quarkus.jvm.threads.live_threads,gauge,,thread,,The current number of live threads including both daemon and non-daemon threads.,0,quarkus,,, +quarkus.jvm.threads.peak_threads,gauge,,thread,,The peak live thread count since the Java virtual machine started or peak was reset.,0,quarkus,,, +quarkus.jvm.threads.states_threads,gauge,,thread,,The current number of threads.,0,quarkus,,, +quarkus.netty.allocator.memory.pinned,gauge,,byte,,"Size, in bytes, of the memory that the allocated buffer uses.",0,quarkus,,, +quarkus.netty.allocator.memory.used,gauge,,byte,,"Size, in bytes, of the memory that the allocator uses.",0,quarkus,,, +quarkus.netty.allocator.pooled.arenas,gauge,,byte,,Number of arenas for a pooled allocator.,0,quarkus,,, +quarkus.netty.allocator.pooled.cache_size,gauge,,byte,,"Size, in bytes, of the cache for a pooled allocator.",0,quarkus,,, +quarkus.netty.allocator.pooled.chunk_size,gauge,,byte,,"Size, in bytes, of memory chunks for a pooled allocator.",0,quarkus,,, +quarkus.netty.allocator.pooled.threadlocal_caches,gauge,,,,Number of ThreadLocal caches for a pooled allocator.,0,quarkus,,, +quarkus.netty.eventexecutor.tasks_pending,gauge,,task,,Number of pending tasks in the event executor.,0,quarkus,,, +quarkus.process.cpu.usage,gauge,,,,The recent cpu usage for the Java Virtual Machine process.,0,quarkus,,, +quarkus.process.files.max_files,gauge,,file,,The maximum file descriptor count.,0,quarkus,,, +quarkus.process.files.open_files,gauge,,file,,The open file descriptor count.,0,quarkus,,, +quarkus.process.uptime.seconds,gauge,,second,,The uptime of the Java virtual machine.,0,quarkus,,, +quarkus.system.cpu.count,gauge,,,,The number of processors available to the Java virtual machine.,0,quarkus,,, +quarkus.system.cpu.usage,gauge,,,,The recent cpu usage of the system the application is running in.,0,quarkus,,, +quarkus.system.load_average_1m,gauge,,,,The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time.,0,quarkus,,, +quarkus.worker_pool.active,gauge,,,,The number of resources from the pool currently used.,0,quarkus,,, +quarkus.worker_pool.idle,gauge,,,,The number of resources from the pool currently used.,0,quarkus,,, +quarkus.worker_pool.queue.delay.seconds.count,count,,,,Number of items that spent time in the waiting queue before being processed.,0,quarkus,,, +quarkus.worker_pool.queue.delay.seconds.max,gauge,,second,,Current maximum time spent in the waiting queue before being processed.,0,quarkus,,, +quarkus.worker_pool.queue.delay.seconds.sum,count,,,,Total time spent in the waiting queue before being processed.,0,quarkus,,, +quarkus.worker_pool.queue.size,gauge,,,,Number of pending elements in the waiting queue.,0,quarkus,,, +quarkus.worker_pool.ratio,gauge,,fraction,,Ratio of workers being used at the moment.,0,quarkus,,, +quarkus.worker_pool.usage.seconds.count,count,,second,,Number of times resources from the pool were being used.,0,quarkus,,, +quarkus.worker_pool.usage.seconds.max,gauge,,second,,Maximum time spent using resources from the pool.,0,quarkus,,, +quarkus.worker_pool.usage.seconds.sum,count,,second,,Total time spent using resources from the pool.,0,quarkus,,, diff --git a/quarkus/pyproject.toml b/quarkus/pyproject.toml new file mode 100644 index 0000000000000..fe7cf0e23997a --- /dev/null +++ b/quarkus/pyproject.toml @@ -0,0 +1,60 @@ +[build-system] +requires = [ + "hatchling>=0.13.0", +] +build-backend = "hatchling.build" + +[project] +name = "datadog-quarkus" +description = "The Quarkus check" +readme = "README.md" +license = "BSD-3-Clause" +requires-python = ">=3.12" +keywords = [ + "datadog", + "datadog agent", + "datadog check", + "quarkus", +] +authors = [ + { name = "Datadog", email = "packages@datadoghq.com" }, +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: BSD License", + "Private :: Do Not Upload", + "Programming Language :: Python :: 3.12", + "Topic :: System :: Monitoring", +] +dependencies = [ + "datadog-checks-base>=37.0.0", +] +dynamic = [ + "version", +] + +[project.optional-dependencies] +deps = [] + +[project.urls] +Source = "https://github.com/DataDog/integrations-core" + +[tool.hatch.version] +path = "datadog_checks/quarkus/__about__.py" + +[tool.hatch.build.targets.sdist] +include = [ + "/datadog_checks", + "/tests", + "/manifest.json", +] + +[tool.hatch.build.targets.wheel] +include = [ + "/datadog_checks/quarkus", +] +dev-mode-dirs = [ + ".", +] diff --git a/quarkus/tests/__init__.py b/quarkus/tests/__init__.py new file mode 100644 index 0000000000000..9103122bf028d --- /dev/null +++ b/quarkus/tests/__init__.py @@ -0,0 +1,3 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) diff --git a/quarkus/tests/conftest.py b/quarkus/tests/conftest.py new file mode 100644 index 0000000000000..1e8d20eae623f --- /dev/null +++ b/quarkus/tests/conftest.py @@ -0,0 +1,29 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import copy +from pathlib import Path + +import pytest + +from datadog_checks.dev import docker_run +from datadog_checks.dev.conditions import CheckEndpoints + +INSTANCE = {'openmetrics_endpoint': 'http://localhost:8080/q/metrics'} + + +@pytest.fixture(scope='session') +def dd_environment(): + compose_file = str(Path(__file__).parent.absolute() / 'docker' / 'docker-compose.yaml') + conditions = [ + CheckEndpoints(INSTANCE["openmetrics_endpoint"]), + ] + with docker_run(compose_file, conditions=conditions): + yield { + 'instances': [INSTANCE], + } + + +@pytest.fixture +def instance(): + return copy.deepcopy(INSTANCE) diff --git a/quarkus/tests/docker/README.md b/quarkus/tests/docker/README.md new file mode 100644 index 0000000000000..51ee36d9d6ef9 --- /dev/null +++ b/quarkus/tests/docker/README.md @@ -0,0 +1,10 @@ +To test an example Quarkus app that exposed metrics, we took the documented example from here: +https://github.com/quarkusio/quarkus-quickstarts/tree/1347e49b4441e43c3faac3b3953dd5e988af379b/micrometer-quickstart + +We then used this StackOverflow post to write a Dockerfile that would build the app: +https://stackoverflow.com/a/75759520 + +We needed the following tweaks: + +- Tweak `.dockerignore` to stop ignoring all files. +- Disable the step `RUN ./mvnw dependency:go-offline -B` in the Dockerfile. diff --git a/quarkus/tests/docker/docker-compose.yaml b/quarkus/tests/docker/docker-compose.yaml new file mode 100755 index 0000000000000..1f07754eca0d1 --- /dev/null +++ b/quarkus/tests/docker/docker-compose.yaml @@ -0,0 +1,6 @@ +services: + + quarkus-app: + build: micrometer-quickstart + ports: + - "8080:8080" diff --git a/quarkus/tests/docker/micrometer-quickstart/.dockerignore b/quarkus/tests/docker/micrometer-quickstart/.dockerignore new file mode 100644 index 0000000000000..7b6be1b3d4556 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/.dockerignore @@ -0,0 +1,4 @@ +!target/*-runner +!target/*-runner.jar +!target/lib/* +!target/quarkus-app/ diff --git a/quarkus/tests/docker/micrometer-quickstart/.gitignore b/quarkus/tests/docker/micrometer-quickstart/.gitignore new file mode 100644 index 0000000000000..087a18358fe57 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/.gitignore @@ -0,0 +1,35 @@ +# Eclipse +.project +.classpath +.settings/ +bin/ + +# IntelliJ +.idea +*.ipr +*.iml +*.iws + +# NetBeans +nb-configuration.xml + +# Visual Studio Code +.vscode + +# OSX +.DS_Store + +# Vim +*.swp +*.swo + +# patch +*.orig +*.rej + +# Maven +target/ +pom.xml.tag +pom.xml.releaseBackup +pom.xml.versionsBackup +release.properties \ No newline at end of file diff --git a/quarkus/tests/docker/micrometer-quickstart/.mvn/wrapper/maven-wrapper.jar b/quarkus/tests/docker/micrometer-quickstart/.mvn/wrapper/maven-wrapper.jar new file mode 100644 index 0000000000000..7967f30dd1d25 Binary files /dev/null and b/quarkus/tests/docker/micrometer-quickstart/.mvn/wrapper/maven-wrapper.jar differ diff --git a/quarkus/tests/docker/micrometer-quickstart/.mvn/wrapper/maven-wrapper.properties b/quarkus/tests/docker/micrometer-quickstart/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000000000..9548abd8e8c0e --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +wrapperVersion=3.3.2 +distributionType=bin +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.9/apache-maven-3.9.9-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.3.2/maven-wrapper-3.3.2.jar diff --git a/quarkus/tests/docker/micrometer-quickstart/Dockerfile b/quarkus/tests/docker/micrometer-quickstart/Dockerfile new file mode 100644 index 0000000000000..09f4137121700 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/Dockerfile @@ -0,0 +1,52 @@ +# Use the official JDK 19 image as the base image for the build stage +FROM openjdk:19-jdk AS build + +# Enable preview features +ENV JAVA_OPTS="--enable-preview" + +# Set the working directory +WORKDIR /app + +# Copy the Maven wrapper +COPY ./mvnw . +COPY ./.mvn .mvn + +# Copy the pom.xml file +COPY ./pom.xml . + +# The StackOverflow post where we got this dockerfile included the steps to download the deps. +# This didn't work for us, so we disabled it. +# https://stackoverflow.com/a/75759520 +# ENV HTTP_PROXY="http://host.docker.internal:3128" +# ENV HTTPS_PROXY="http://host.docker.internal:3128" +# ENV http_proxy="http://host.docker.internal:3128" +# ENV https_proxy="http://host.docker.internal:3128" +# ENV MAVEN_OPTS="-Dhttp.proxyHost=host.docker.internal -Dhttp.proxyPort=3128 -Dhttps.proxyHost=host.docker.internal -Dhttps.proxyPort=3128 --enable-preview" +# Download dependencies and cache them +# RUN ./mvnw dependency:go-offline -B + +# Copy the source code +COPY ./src . + +# Compile and package the application +RUN ./mvnw package -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -B -V + + +# Use the official JDK 19 image as the base image for the runtime stage +FROM openjdk:19-jdk AS runtime + +# Enable preview features +ENV JAVA_OPTS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager --enable-preview" + +# Set the working directory +WORKDIR /app + +# Copy the build artifacts from the build stage +#COPY --from=build /app/target/quarkus-app/quarkus-run.jar /app/app.jar +COPY --from=build /app/target/quarkus-app/lib/ /app/lib/ +COPY --from=build /app/target/quarkus-app/*.jar /app/ +COPY --from=build /app/target/quarkus-app/app/ /app/app/ +COPY --from=build /app/target/quarkus-app/quarkus/ /app/quarkus/ + +# Set the entrypoint and command to run the application +ENTRYPOINT ["sh", "-c", "java $JAVA_OPTS -jar /app/quarkus-run.jar"] diff --git a/quarkus/tests/docker/micrometer-quickstart/README.md b/quarkus/tests/docker/micrometer-quickstart/README.md new file mode 100644 index 0000000000000..500cfac084a17 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/README.md @@ -0,0 +1 @@ +Quarkus guide: https://quarkus.io/guides/micrometer diff --git a/quarkus/tests/docker/micrometer-quickstart/mvnw b/quarkus/tests/docker/micrometer-quickstart/mvnw new file mode 100755 index 0000000000000..5e9618cac26d1 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/mvnw @@ -0,0 +1,332 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Apache Maven Wrapper startup batch script, version 3.3.2 +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ]; then + + if [ -f /usr/local/etc/mavenrc ]; then + . /usr/local/etc/mavenrc + fi + + if [ -f /etc/mavenrc ]; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ]; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false +darwin=false +mingw=false +case "$(uname)" in +CYGWIN*) cygwin=true ;; +MINGW*) mingw=true ;; +Darwin*) + darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + JAVA_HOME="$(/usr/libexec/java_home)" + export JAVA_HOME + else + JAVA_HOME="/Library/Java/Home" + export JAVA_HOME + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ]; then + if [ -r /etc/gentoo-release ]; then + JAVA_HOME=$(java-config --jre-home) + fi +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin; then + [ -n "$JAVA_HOME" ] \ + && JAVA_HOME=$(cygpath --unix "$JAVA_HOME") + [ -n "$CLASSPATH" ] \ + && CLASSPATH=$(cygpath --path --unix "$CLASSPATH") +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw; then + [ -n "$JAVA_HOME" ] && [ -d "$JAVA_HOME" ] \ + && JAVA_HOME="$( + cd "$JAVA_HOME" || ( + echo "cannot cd into $JAVA_HOME." >&2 + exit 1 + ) + pwd + )" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="$(which javac)" + if [ -n "$javaExecutable" ] && ! [ "$(expr "$javaExecutable" : '\([^ ]*\)')" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=$(which readlink) + if [ ! "$(expr "$readLink" : '\([^ ]*\)')" = "no" ]; then + if $darwin; then + javaHome="$(dirname "$javaExecutable")" + javaExecutable="$(cd "$javaHome" && pwd -P)/javac" + else + javaExecutable="$(readlink -f "$javaExecutable")" + fi + javaHome="$(dirname "$javaExecutable")" + javaHome=$(expr "$javaHome" : '\(.*\)/bin') + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ]; then + if [ -n "$JAVA_HOME" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="$( + \unset -f command 2>/dev/null + \command -v java + )" + fi +fi + +if [ ! -x "$JAVACMD" ]; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ]; then + echo "Warning: JAVA_HOME environment variable is not set." >&2 +fi + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + if [ -z "$1" ]; then + echo "Path not specified to find_maven_basedir" >&2 + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ]; do + if [ -d "$wdir"/.mvn ]; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=$( + cd "$wdir/.." || exit 1 + pwd + ) + fi + # end of workaround + done + printf '%s' "$( + cd "$basedir" || exit 1 + pwd + )" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + # Remove \r in case we run on Windows within Git Bash + # and check out the repository with auto CRLF management + # enabled. Otherwise, we may read lines that are delimited with + # \r\n and produce $'-Xarg\r' rather than -Xarg due to word + # splitting rules. + tr -s '\r\n' ' ' <"$1" + fi +} + +log() { + if [ "$MVNW_VERBOSE" = true ]; then + printf '%s\n' "$1" + fi +} + +BASE_DIR=$(find_maven_basedir "$(dirname "$0")") +if [ -z "$BASE_DIR" ]; then + exit 1 +fi + +MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +export MAVEN_PROJECTBASEDIR +log "$MAVEN_PROJECTBASEDIR" + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +wrapperJarPath="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" +if [ -r "$wrapperJarPath" ]; then + log "Found $wrapperJarPath" +else + log "Couldn't find $wrapperJarPath, downloading it ..." + + if [ -n "$MVNW_REPOURL" ]; then + wrapperUrl="$MVNW_REPOURL/org/apache/maven/wrapper/maven-wrapper/3.3.2/maven-wrapper-3.3.2.jar" + else + wrapperUrl="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.3.2/maven-wrapper-3.3.2.jar" + fi + while IFS="=" read -r key value; do + # Remove '\r' from value to allow usage on windows as IFS does not consider '\r' as a separator ( considers space, tab, new line ('\n'), and custom '=' ) + safeValue=$(echo "$value" | tr -d '\r') + case "$key" in wrapperUrl) + wrapperUrl="$safeValue" + break + ;; + esac + done <"$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" + log "Downloading from: $wrapperUrl" + + if $cygwin; then + wrapperJarPath=$(cygpath --path --windows "$wrapperJarPath") + fi + + if command -v wget >/dev/null; then + log "Found wget ... using wget" + [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--quiet" + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget $QUIET "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" + else + wget $QUIET --http-user="$MVNW_USERNAME" --http-password="$MVNW_PASSWORD" "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" + fi + elif command -v curl >/dev/null; then + log "Found curl ... using curl" + [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--silent" + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl $QUIET -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" + else + curl $QUIET --user "$MVNW_USERNAME:$MVNW_PASSWORD" -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" + fi + else + log "Falling back to using Java to download" + javaSource="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.java" + javaClass="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.class" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaSource=$(cygpath --path --windows "$javaSource") + javaClass=$(cygpath --path --windows "$javaClass") + fi + if [ -e "$javaSource" ]; then + if [ ! -e "$javaClass" ]; then + log " - Compiling MavenWrapperDownloader.java ..." + ("$JAVA_HOME/bin/javac" "$javaSource") + fi + if [ -e "$javaClass" ]; then + log " - Running MavenWrapperDownloader.java ..." + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$wrapperUrl" "$wrapperJarPath") || rm -f "$wrapperJarPath" + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +# If specified, validate the SHA-256 sum of the Maven wrapper jar file +wrapperSha256Sum="" +while IFS="=" read -r key value; do + case "$key" in wrapperSha256Sum) + wrapperSha256Sum=$value + break + ;; + esac +done <"$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" +if [ -n "$wrapperSha256Sum" ]; then + wrapperSha256Result=false + if command -v sha256sum >/dev/null; then + if echo "$wrapperSha256Sum $wrapperJarPath" | sha256sum -c >/dev/null 2>&1; then + wrapperSha256Result=true + fi + elif command -v shasum >/dev/null; then + if echo "$wrapperSha256Sum $wrapperJarPath" | shasum -a 256 -c >/dev/null 2>&1; then + wrapperSha256Result=true + fi + else + echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." >&2 + echo "Please install either command, or disable validation by removing 'wrapperSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + fi + if [ $wrapperSha256Result = false ]; then + echo "Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised." >&2 + echo "Investigate or delete $wrapperJarPath to attempt a clean download." >&2 + echo "If you updated your Maven version, you need to update the specified wrapperSha256Sum property." >&2 + exit 1 + fi +fi + +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$JAVA_HOME" ] \ + && JAVA_HOME=$(cygpath --path --windows "$JAVA_HOME") + [ -n "$CLASSPATH" ] \ + && CLASSPATH=$(cygpath --path --windows "$CLASSPATH") + [ -n "$MAVEN_PROJECTBASEDIR" ] \ + && MAVEN_PROJECTBASEDIR=$(cygpath --path --windows "$MAVEN_PROJECTBASEDIR") +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $*" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +# shellcheck disable=SC2086 # safe args +exec "$JAVACMD" \ + $MAVEN_OPTS \ + $MAVEN_DEBUG_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/quarkus/tests/docker/micrometer-quickstart/mvnw.cmd b/quarkus/tests/docker/micrometer-quickstart/mvnw.cmd new file mode 100644 index 0000000000000..4136715f081ec --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/mvnw.cmd @@ -0,0 +1,206 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Apache Maven Wrapper startup batch script, version 3.3.2 +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%USERPROFILE%\mavenrc_pre.bat" call "%USERPROFILE%\mavenrc_pre.bat" %* +if exist "%USERPROFILE%\mavenrc_pre.cmd" call "%USERPROFILE%\mavenrc_pre.cmd" %* +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. >&2 +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. >&2 +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. >&2 +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. >&2 +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set WRAPPER_URL="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.3.2/maven-wrapper-3.3.2.jar" + +FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET WRAPPER_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET WRAPPER_URL="%MVNW_REPOURL%/org/apache/maven/wrapper/maven-wrapper/3.3.2/maven-wrapper-3.3.2.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %WRAPPER_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%WRAPPER_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM If specified, validate the SHA-256 sum of the Maven wrapper jar file +SET WRAPPER_SHA_256_SUM="" +FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperSha256Sum" SET WRAPPER_SHA_256_SUM=%%B +) +IF NOT %WRAPPER_SHA_256_SUM%=="" ( + powershell -Command "&{"^ + "Import-Module $PSHOME\Modules\Microsoft.PowerShell.Utility -Function Get-FileHash;"^ + "$hash = (Get-FileHash \"%WRAPPER_JAR%\" -Algorithm SHA256).Hash.ToLower();"^ + "If('%WRAPPER_SHA_256_SUM%' -ne $hash){"^ + " Write-Error 'Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised.';"^ + " Write-Error 'Investigate or delete %WRAPPER_JAR% to attempt a clean download.';"^ + " Write-Error 'If you updated your Maven version, you need to update the specified wrapperSha256Sum property.';"^ + " exit 1;"^ + "}"^ + "}" + if ERRORLEVEL 1 goto error +) + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% ^ + %JVM_CONFIG_MAVEN_PROPS% ^ + %MAVEN_OPTS% ^ + %MAVEN_DEBUG_OPTS% ^ + -classpath %WRAPPER_JAR% ^ + "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" ^ + %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%"=="" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%USERPROFILE%\mavenrc_post.bat" call "%USERPROFILE%\mavenrc_post.bat" +if exist "%USERPROFILE%\mavenrc_post.cmd" call "%USERPROFILE%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%"=="on" pause + +if "%MAVEN_TERMINATE_CMD%"=="on" exit %ERROR_CODE% + +cmd /C exit /B %ERROR_CODE% diff --git a/quarkus/tests/docker/micrometer-quickstart/pom.xml b/quarkus/tests/docker/micrometer-quickstart/pom.xml new file mode 100644 index 0000000000000..623b16ead9cdb --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/pom.xml @@ -0,0 +1,127 @@ + + + 4.0.0 + + org.acme + micrometer-quickstart + 1.0.0-SNAPSHOT + + + quarkus-bom + io.quarkus.platform + 3.17.4 + 3.11.0 + 3.1.2 + 17 + 17 + true + UTF-8 + + + + + + ${quarkus.platform.group-id} + ${quarkus.platform.artifact-id} + ${quarkus.platform.version} + pom + import + + + + + + + io.quarkus + quarkus-micrometer-registry-prometheus + + + io.quarkus + quarkus-rest + + + io.quarkus + quarkus-junit5 + test + + + io.rest-assured + rest-assured + test + + + + + ${project.artifactId} + + + maven-compiler-plugin + ${compiler-plugin.version} + + + maven-surefire-plugin + ${surefire-plugin.version} + + + org.jboss.logmanager.LogManager + ${maven.home} + + + + + ${quarkus.platform.group-id} + quarkus-maven-plugin + ${quarkus.platform.version} + + + + build + + + + + + + + + native + + + native + + + + true + false + + + + + org.apache.maven.plugins + maven-failsafe-plugin + ${surefire-plugin.version} + + + + integration-test + verify + + + + ${project.build.directory}/${project.build.finalName}-runner + org.jboss.logmanager.LogManager + ${maven.home} + + + + + + + + + + + + diff --git a/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.jvm b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.jvm new file mode 100644 index 0000000000000..e79d3a6e0865c --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.jvm @@ -0,0 +1,97 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode +# +# Before building the container image run: +# +# ./mvnw package +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/micrometer-quickstart-jvm . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/micrometer-quickstart-jvm +# +# If you want to include the debug port into your docker image +# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. +# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 +# when running the container +# +# Then run the container using : +# +# docker run -i --rm -p 8080:8080 quarkus/micrometer-quickstart-jvm +# +# This image uses the `run-java.sh` script to run the application. +# This scripts computes the command line to execute your Java application, and +# includes memory/GC tuning. +# You can configure the behavior using the following environment properties: +# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") +# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options +# in JAVA_OPTS (example: "-Dsome.property=foo") +# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is +# used to calculate a default maximal heap memory based on a containers restriction. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio +# of the container available memory as set here. The default is `50` which means 50% +# of the available memory is used as an upper boundary. You can skip this mechanism by +# setting this value to `0` in which case no `-Xmx` option is added. +# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This +# is used to calculate a default initial heap memory based on the maximum heap memory. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio +# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` +# is used as the initial heap size. You can skip this mechanism by setting this value +# to `0` in which case no `-Xms` option is added (example: "25") +# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. +# This is used to calculate the maximum value of the initial heap memory. If used in +# a container without any memory constraints for the container then this option has +# no effect. If there is a memory constraint then `-Xms` is limited to the value set +# here. The default is 4096MB which means the calculated value of `-Xms` never will +# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") +# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output +# when things are happening. This option, if set to true, will set +# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). +# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: +# true"). +# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). +# - CONTAINER_CORE_LIMIT: A calculated core limit as described in +# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") +# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). +# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. +# (example: "20") +# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. +# (example: "40") +# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. +# (example: "4") +# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus +# previous GC times. (example: "90") +# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") +# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") +# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should +# contain the necessary JRE command-line options to specify the required GC, which +# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). +# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") +# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") +# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be +# accessed directly. (example: "foo.example.com,bar.example.com") +# +### +FROM registry.access.redhat.com/ubi8/openjdk-17:1.20 + +ENV LANGUAGE='en_US:en' + + +# We make four distinct layers so if there are application changes the library layers can be re-used +COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/ +COPY --chown=185 target/quarkus-app/*.jar /deployments/ +COPY --chown=185 target/quarkus-app/app/ /deployments/app/ +COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/ + +EXPOSE 8080 +USER 185 +ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" +ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" + +ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] + diff --git a/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.legacy-jar b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.legacy-jar new file mode 100644 index 0000000000000..53a3108267d77 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.legacy-jar @@ -0,0 +1,93 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode +# +# Before building the container image run: +# +# ./mvnw package -Dquarkus.package.jar.type=legacy-jar +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/micrometer-quickstart-legacy-jar . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/micrometer-quickstart-legacy-jar +# +# If you want to include the debug port into your docker image +# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. +# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 +# when running the container +# +# Then run the container using : +# +# docker run -i --rm -p 8080:8080 quarkus/micrometer-quickstart-legacy-jar +# +# This image uses the `run-java.sh` script to run the application. +# This scripts computes the command line to execute your Java application, and +# includes memory/GC tuning. +# You can configure the behavior using the following environment properties: +# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") +# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options +# in JAVA_OPTS (example: "-Dsome.property=foo") +# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is +# used to calculate a default maximal heap memory based on a containers restriction. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio +# of the container available memory as set here. The default is `50` which means 50% +# of the available memory is used as an upper boundary. You can skip this mechanism by +# setting this value to `0` in which case no `-Xmx` option is added. +# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This +# is used to calculate a default initial heap memory based on the maximum heap memory. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio +# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` +# is used as the initial heap size. You can skip this mechanism by setting this value +# to `0` in which case no `-Xms` option is added (example: "25") +# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. +# This is used to calculate the maximum value of the initial heap memory. If used in +# a container without any memory constraints for the container then this option has +# no effect. If there is a memory constraint then `-Xms` is limited to the value set +# here. The default is 4096MB which means the calculated value of `-Xms` never will +# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") +# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output +# when things are happening. This option, if set to true, will set +# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). +# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: +# true"). +# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). +# - CONTAINER_CORE_LIMIT: A calculated core limit as described in +# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") +# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). +# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. +# (example: "20") +# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. +# (example: "40") +# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. +# (example: "4") +# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus +# previous GC times. (example: "90") +# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") +# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") +# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should +# contain the necessary JRE command-line options to specify the required GC, which +# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). +# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") +# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") +# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be +# accessed directly. (example: "foo.example.com,bar.example.com") +# +### +FROM registry.access.redhat.com/ubi8/openjdk-17:1.20 + +ENV LANGUAGE='en_US:en' + + +COPY target/lib/* /deployments/lib/ +COPY target/*-runner.jar /deployments/quarkus-run.jar + +EXPOSE 8080 +USER 185 +ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" +ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" + +ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] diff --git a/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.native b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.native new file mode 100644 index 0000000000000..e8fd1da6b4245 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.native @@ -0,0 +1,27 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. +# +# Before building the container image run: +# +# ./mvnw package -Dnative +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.native -t quarkus/micrometer-quickstart . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/micrometer-quickstart +# +### +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10 +WORKDIR /work/ +RUN chown 1001 /work \ + && chmod "g+rwX" /work \ + && chown 1001:root /work +COPY --chown=1001:root target/*-runner /work/application + +EXPOSE 8080 +USER 1001 + +ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.native-micro b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.native-micro new file mode 100644 index 0000000000000..4eff6a24a7bf2 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.native-micro @@ -0,0 +1,30 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. +# It uses a micro base image, tuned for Quarkus native executables. +# It reduces the size of the resulting container image. +# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image. +# +# Before building the container image run: +# +# ./mvnw package -Dnative +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/micrometer-quickstart . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/micrometer-quickstart +# +### +FROM quay.io/quarkus/quarkus-micro-image:2.0 +WORKDIR /work/ +RUN chown 1001 /work \ + && chmod "g+rwX" /work \ + && chown 1001:root /work +COPY --chown=1001:root target/*-runner /work/application + +EXPOSE 8080 +USER 1001 + +ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/quarkus/tests/docker/micrometer-quickstart/src/main/java/org/acme/micrometer/ExampleResource.java b/quarkus/tests/docker/micrometer-quickstart/src/main/java/org/acme/micrometer/ExampleResource.java new file mode 100644 index 0000000000000..0629a31574f79 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/main/java/org/acme/micrometer/ExampleResource.java @@ -0,0 +1,81 @@ +package org.acme.micrometer; + +import java.util.LinkedList; +import java.util.NoSuchElementException; + +import jakarta.ws.rs.GET; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.Produces; + +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.Tags; +import io.micrometer.core.instrument.Timer; + +@Path("/example") +@Produces("text/plain") +public class ExampleResource { + + private final MeterRegistry registry; + + LinkedList list = new LinkedList<>(); + + // Update the constructor to create the gauge + ExampleResource(MeterRegistry registry) { + this.registry = registry; + registry.gaugeCollectionSize("example.list.size", Tags.empty(), list); + } + + @GET + @Path("gauge/{number}") + public Long checkListSize(long number) { + if (number == 2 || number % 2 == 0) { + // add even numbers to the list + list.add(number); + } else { + // remove items from the list for odd numbers + try { + number = list.removeFirst(); + } catch (NoSuchElementException nse) { + number = 0; + } + } + return number; + } + + @GET + @Path("prime/{number}") + public String checkIfPrime(long number) { + if (number < 1) { + registry.counter("example.prime.number", "type", "not-natural").increment(); + return "Only natural numbers can be prime numbers."; + } + if (number == 1) { + registry.counter("example.prime.number", "type", "one").increment(); + return number + " is not prime."; + } + if (number == 2 || number % 2 == 0) { + registry.counter("example.prime.number", "type", "even").increment(); + return number + " is not prime."; + } + + if (testPrimeNumber(number)) { + registry.counter("example.prime.number", "type", "prime").increment(); + return number + " is prime."; + } else { + registry.counter("example.prime.number", "type", "not-prime").increment(); + return number + " is not prime."; + } + } + + protected boolean testPrimeNumber(long number) { + Timer timer = registry.timer("example.prime.number.test"); + return timer.record(() -> { + for (int i = 3; i < Math.floor(Math.sqrt(number)) + 1; i = i + 2) { + if (number % i == 0) { + return false; + } + } + return true; + }); + } +} diff --git a/quarkus/tests/docker/micrometer-quickstart/src/test/java/org/acme/micrometer/ExampleResourceIT.java b/quarkus/tests/docker/micrometer-quickstart/src/test/java/org/acme/micrometer/ExampleResourceIT.java new file mode 100644 index 0000000000000..6c24500cf3963 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/test/java/org/acme/micrometer/ExampleResourceIT.java @@ -0,0 +1,8 @@ +package org.acme.micrometer; + +import io.quarkus.test.junit.QuarkusIntegrationTest; + +@QuarkusIntegrationTest +public class ExampleResourceIT extends ExampleResourceTest { + +} diff --git a/quarkus/tests/docker/micrometer-quickstart/src/test/java/org/acme/micrometer/ExampleResourceTest.java b/quarkus/tests/docker/micrometer-quickstart/src/test/java/org/acme/micrometer/ExampleResourceTest.java new file mode 100644 index 0000000000000..f0e5b8f1a34a7 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/test/java/org/acme/micrometer/ExampleResourceTest.java @@ -0,0 +1,68 @@ +package org.acme.micrometer; + +import static io.restassured.RestAssured.get; +import static io.restassured.RestAssured.when; +import static org.hamcrest.CoreMatchers.containsString; + +import org.junit.jupiter.api.Test; + +import io.quarkus.test.junit.QuarkusTest; +import io.restassured.http.Header; + +@QuarkusTest +public class ExampleResourceTest { + + @Test + void testGaugeExample() { + when().get("/example/gauge/1").then().statusCode(200); + when().get("/example/gauge/2").then().statusCode(200); + when().get("/example/gauge/4").then().statusCode(200); + when().get("/q/metrics").then().statusCode(200) + .body(containsString( + "example_list_size 2.0")); + when().get("/example/gauge/6").then().statusCode(200); + when().get("/example/gauge/5").then().statusCode(200); + when().get("/example/gauge/7").then().statusCode(200); + when().get("/q/metrics").then().statusCode(200) + .body(containsString( + "example_list_size 1.0")); + } + + @Test + void testCounterExample() { + when().get("/example/prime/-1").then().statusCode(200); + when().get("/example/prime/0").then().statusCode(200); + when().get("/example/prime/1").then().statusCode(200); + when().get("/example/prime/2").then().statusCode(200); + when().get("/example/prime/3").then().statusCode(200); + when().get("/example/prime/15").then().statusCode(200); + + when().get("/q/metrics").then().statusCode(200) + .body(containsString( + "example_prime_number_total{type=\"prime\"}")) + .body(containsString( + "example_prime_number_total{type=\"not-prime\"}")) + .body(containsString( + "example_prime_number_total{type=\"one\"}")) + .body(containsString( + "example_prime_number_total{type=\"even\"}")) + .body(containsString( + "example_prime_number_total{type=\"not-natural\"}")); + } + + @Test + void testTimerExample() { + when().get("/example/prime/257").then().statusCode(200); + when().get("/q/metrics").then().statusCode(200) + .body(containsString( + "example_prime_number_test_seconds_sum")) + .body(containsString( + "example_prime_number_test_seconds_max")) + .body(containsString( + "example_prime_number_test_seconds_count 1.0")); + when().get("/example/prime/7919").then().statusCode(200); + when().get("/q/metrics").then().statusCode(200) + .body(containsString( + "example_prime_number_test_seconds_count 2.0")); + } +} diff --git a/quarkus/tests/fixtures/quarkus_auto_metrics.txt b/quarkus/tests/fixtures/quarkus_auto_metrics.txt new file mode 100644 index 0000000000000..fb35ed0b0fb1f --- /dev/null +++ b/quarkus/tests/fixtures/quarkus_auto_metrics.txt @@ -0,0 +1,241 @@ +# TYPE worker_pool_rejected counter +# HELP worker_pool_rejected Number of times submissions to the pool have been rejected +worker_pool_rejected_total{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_rejected_total{pool_name="vert.x-worker-thread",pool_type="worker"} 0.0 +# TYPE worker_pool_completed counter +# HELP worker_pool_completed Number of times resources from the pool have been acquired +worker_pool_completed_total{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_completed_total{pool_name="vert.x-worker-thread",pool_type="worker"} 5.0 +# TYPE jvm_gc_memory_promoted_bytes counter +# HELP jvm_gc_memory_promoted_bytes Count of positive increases in the size of the old generation memory pool before GC to after GC +jvm_gc_memory_promoted_bytes_total 0.0 +# TYPE netty_allocator_pooled_cache_size gauge +# HELP netty_allocator_pooled_cache_size +netty_allocator_pooled_cache_size{allocator_type="PooledByteBufAllocator",cache_type="normal",id="298568580"} 64.0 +netty_allocator_pooled_cache_size{allocator_type="PooledByteBufAllocator",cache_type="normal",id="1612048265"} 64.0 +netty_allocator_pooled_cache_size{allocator_type="PooledByteBufAllocator",cache_type="small",id="298568580"} 256.0 +netty_allocator_pooled_cache_size{allocator_type="PooledByteBufAllocator",cache_type="small",id="1612048265"} 256.0 +# TYPE worker_pool_queue_delay_seconds_max gauge +# HELP worker_pool_queue_delay_seconds_max Time spent in the waiting queue before being processed +worker_pool_queue_delay_seconds_max{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_queue_delay_seconds_max{pool_name="vert.x-worker-thread",pool_type="worker"} 0.001048665 +# TYPE worker_pool_queue_delay_seconds summary +# HELP worker_pool_queue_delay_seconds Time spent in the waiting queue before being processed +worker_pool_queue_delay_seconds_count{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_queue_delay_seconds_sum{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_queue_delay_seconds_count{pool_name="vert.x-worker-thread",pool_type="worker"} 6.0 +worker_pool_queue_delay_seconds_sum{pool_name="vert.x-worker-thread",pool_type="worker"} 0.002354759 +# TYPE jvm_memory_committed_bytes gauge +# HELP jvm_memory_committed_bytes The amount of memory in bytes that is committed for the Java virtual machine to use +jvm_memory_committed_bytes{area="heap",id="G1 Survivor Space"} 1.2582912E7 +jvm_memory_committed_bytes{area="heap",id="G1 Old Gen"} 5.8720256E7 +jvm_memory_committed_bytes{area="nonheap",id="Metaspace"} 5.1576832E7 +jvm_memory_committed_bytes{area="nonheap",id="CodeCache"} 1.3369344E7 +jvm_memory_committed_bytes{area="heap",id="G1 Eden Space"} 9.0177536E7 +jvm_memory_committed_bytes{area="nonheap",id="Compressed Class Space"} 7602176.0 +# TYPE process_uptime_seconds gauge +# HELP process_uptime_seconds The uptime of the Java virtual machine +process_uptime_seconds 99.172 +# TYPE jvm_threads_daemon_threads gauge +# HELP jvm_threads_daemon_threads The current number of live daemon threads +jvm_threads_daemon_threads 12.0 +# TYPE http_server_connections_seconds_max gauge +# HELP http_server_connections_seconds_max The duration of the connections +http_server_connections_seconds_max 0.003109493 +# TYPE http_server_connections_seconds summary +# HELP http_server_connections_seconds The duration of the connections +http_server_connections_seconds_active_count 1.0 +http_server_connections_seconds_duration_sum 0.003101871 +# TYPE process_start_time_seconds gauge +# HELP process_start_time_seconds Start time of the process since unix epoch. +process_start_time_seconds 1.734088355036E9 +# TYPE http_server_bytes_read summary +# HELP http_server_bytes_read Number of bytes received by the server +http_server_bytes_read_count 0.0 +http_server_bytes_read_sum 0.0 +# TYPE http_server_bytes_read_max gauge +# HELP http_server_bytes_read_max Number of bytes received by the server +http_server_bytes_read_max 0.0 +# TYPE jvm_threads_live_threads gauge +# HELP jvm_threads_live_threads The current number of live threads including both daemon and non-daemon threads +jvm_threads_live_threads 21.0 +# TYPE http_server_requests_seconds summary +# HELP http_server_requests_seconds HTTP server request processing time +http_server_requests_seconds_count{method="GET",outcome="SUCCESS",status="200",uri="/example/prime/{number}"} 1.0 +http_server_requests_seconds_sum{method="GET",outcome="SUCCESS",status="200",uri="/example/prime/{number}"} 0.010070499 +http_server_requests_seconds_count{method="GET",outcome="CLIENT_ERROR",status="404",uri="NOT_FOUND"} 1.0 +http_server_requests_seconds_sum{method="GET",outcome="CLIENT_ERROR",status="404",uri="NOT_FOUND"} 0.028919085 +# TYPE http_server_requests_seconds_max gauge +# HELP http_server_requests_seconds_max HTTP server request processing time +http_server_requests_seconds_max{method="GET",outcome="SUCCESS",status="200",uri="/example/prime/{number}"} 0.010070499 +http_server_requests_seconds_max{method="GET",outcome="CLIENT_ERROR",status="404",uri="NOT_FOUND"} 0.028919085 +# TYPE system_cpu_usage gauge +# HELP system_cpu_usage The \"recent cpu usage\" of the system the application is running in +system_cpu_usage 6.443298969072165E-4 +# TYPE jvm_gc_overhead gauge +# HELP jvm_gc_overhead An approximation of the percent of CPU time used by GC activities over the last lookback period or since monitoring began, whichever is shorter, in the range [0..1] +jvm_gc_overhead 0.0 +# TYPE worker_pool_active gauge +# HELP worker_pool_active The number of resources from the pool currently used +worker_pool_active{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_active{pool_name="vert.x-worker-thread",pool_type="worker"} 1.0 +# TYPE jvm_threads_states_threads gauge +# HELP jvm_threads_states_threads The current number of threads +jvm_threads_states_threads{state="runnable"} 11.0 +jvm_threads_states_threads{state="blocked"} 0.0 +jvm_threads_states_threads{state="waiting"} 7.0 +jvm_threads_states_threads{state="timed-waiting"} 3.0 +jvm_threads_states_threads{state="new"} 0.0 +jvm_threads_states_threads{state="terminated"} 0.0 +# TYPE netty_allocator_memory_pinned gauge +# HELP netty_allocator_memory_pinned +netty_allocator_memory_pinned{allocator_type="PooledByteBufAllocator",id="1612048265",memory_type="heap"} 0.0 +netty_allocator_memory_pinned{allocator_type="PooledByteBufAllocator",id="1612048265",memory_type="direct"} 0.0 +netty_allocator_memory_pinned{allocator_type="PooledByteBufAllocator",id="298568580",memory_type="heap"} 0.0 +netty_allocator_memory_pinned{allocator_type="PooledByteBufAllocator",id="298568580",memory_type="direct"} 0.0 +# TYPE system_cpu_count gauge +# HELP system_cpu_count The number of processors available to the Java virtual machine +system_cpu_count 4.0 +# TYPE jvm_info counter +# HELP jvm_info JVM version info +jvm_info_total{runtime="OpenJDK Runtime Environment",vendor="Eclipse Adoptium",version="21.0.5+11-LTS"} 1.0 +# TYPE jvm_buffer_memory_used_bytes gauge +# HELP jvm_buffer_memory_used_bytes An estimate of the memory that the Java virtual machine is using for this buffer pool +jvm_buffer_memory_used_bytes{id="mapped - 'non-volatile memory'"} 0.0 +jvm_buffer_memory_used_bytes{id="mapped"} 0.0 +jvm_buffer_memory_used_bytes{id="direct"} 265988.0 +# TYPE netty_eventexecutor_tasks_pending gauge +# HELP netty_eventexecutor_tasks_pending +netty_eventexecutor_tasks_pending{name="vert.x-eventloop-thread-2"} 0.0 +netty_eventexecutor_tasks_pending{name="vert.x-eventloop-thread-1"} 0.0 +netty_eventexecutor_tasks_pending{name="vert.x-eventloop-thread-0"} 0.0 +netty_eventexecutor_tasks_pending{name="vert.x-acceptor-thread-0"} 0.0 +netty_eventexecutor_tasks_pending{name="vert.x-eventloop-thread-3"} 0.0 +# TYPE jvm_buffer_total_capacity_bytes gauge +# HELP jvm_buffer_total_capacity_bytes An estimate of the total capacity of the buffers in this pool +jvm_buffer_total_capacity_bytes{id="mapped - 'non-volatile memory'"} 0.0 +jvm_buffer_total_capacity_bytes{id="mapped"} 0.0 +jvm_buffer_total_capacity_bytes{id="direct"} 265987.0 +# TYPE jvm_gc_max_data_size_bytes gauge +# HELP jvm_gc_max_data_size_bytes Max size of long-lived heap memory pool +jvm_gc_max_data_size_bytes 4.192206848E9 +# TYPE jvm_memory_usage_after_gc gauge +# HELP jvm_memory_usage_after_gc The percentage of long-lived heap pool used after the last GC event, in the range [0..1] +jvm_memory_usage_after_gc{area="heap",pool="long-lived"} 0.0 +# TYPE http_server_bytes_written_max gauge +# HELP http_server_bytes_written_max Number of bytes sent by the server +http_server_bytes_written_max 12288.0 +# TYPE http_server_bytes_written summary +# HELP http_server_bytes_written Number of bytes sent by the server +http_server_bytes_written_count 4.0 +http_server_bytes_written_sum 16571.0 +# TYPE worker_pool_idle gauge +# HELP worker_pool_idle The number of resources from the pool currently used +worker_pool_idle{pool_name="vert.x-internal-blocking",pool_type="worker"} 20.0 +worker_pool_idle{pool_name="vert.x-worker-thread",pool_type="worker"} 199.0 +# TYPE worker_pool_ratio gauge +# HELP worker_pool_ratio Pool usage ratio +worker_pool_ratio{pool_name="vert.x-internal-blocking",pool_type="worker"} NaN +worker_pool_ratio{pool_name="vert.x-worker-thread",pool_type="worker"} 0.005 +# TYPE jvm_memory_max_bytes gauge +# HELP jvm_memory_max_bytes The maximum amount of memory in bytes that can be used for memory management +jvm_memory_max_bytes{area="heap",id="G1 Survivor Space"} -1.0 +jvm_memory_max_bytes{area="heap",id="G1 Old Gen"} 4.192206848E9 +jvm_memory_max_bytes{area="nonheap",id="Metaspace"} -1.0 +jvm_memory_max_bytes{area="nonheap",id="CodeCache"} 5.0331648E7 +jvm_memory_max_bytes{area="heap",id="G1 Eden Space"} -1.0 +jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space"} 1.073741824E9 +# TYPE jvm_memory_used_bytes gauge +# HELP jvm_memory_used_bytes The amount of used memory +jvm_memory_used_bytes{area="heap",id="G1 Survivor Space"} 1.1491696E7 +jvm_memory_used_bytes{area="heap",id="G1 Old Gen"} 4.188796E7 +jvm_memory_used_bytes{area="nonheap",id="Metaspace"} 5.0020504E7 +jvm_memory_used_bytes{area="nonheap",id="CodeCache"} 1.2352896E7 +jvm_memory_used_bytes{area="heap",id="G1 Eden Space"} 5.6623104E7 +jvm_memory_used_bytes{area="nonheap",id="Compressed Class Space"} 6877464.0 +# TYPE netty_allocator_pooled_arenas gauge +# HELP netty_allocator_pooled_arenas +netty_allocator_pooled_arenas{allocator_type="PooledByteBufAllocator",id="1612048265",memory_type="heap"} 8.0 +netty_allocator_pooled_arenas{allocator_type="PooledByteBufAllocator",id="1612048265",memory_type="direct"} 8.0 +netty_allocator_pooled_arenas{allocator_type="PooledByteBufAllocator",id="298568580",memory_type="heap"} 8.0 +netty_allocator_pooled_arenas{allocator_type="PooledByteBufAllocator",id="298568580",memory_type="direct"} 8.0 +# TYPE example_list_size gauge +# HELP example_list_size +example_list_size 0.0 +# TYPE process_cpu_time_ns counter +# HELP process_cpu_time_ns The \"cpu time\" used by the Java Virtual Machine process +process_cpu_time_ns_total 6.28E9 +# TYPE jvm_gc_memory_allocated_bytes counter +# HELP jvm_gc_memory_allocated_bytes Incremented for an increase in the size of the (young) heap memory pool after one GC to before the next +jvm_gc_memory_allocated_bytes_total 0.0 +# TYPE process_files_max_files gauge +# HELP process_files_max_files The maximum file descriptor count +process_files_max_files 1048576.0 +# TYPE http_server_active_requests gauge +# HELP http_server_active_requests +http_server_active_requests 1.0 +# TYPE jvm_classes_unloaded_classes counter +# HELP jvm_classes_unloaded_classes The total number of classes unloaded since the Java virtual machine has started execution +jvm_classes_unloaded_classes_total 7.0 +# TYPE netty_allocator_memory_used gauge +# HELP netty_allocator_memory_used +netty_allocator_memory_used{allocator_type="PooledByteBufAllocator",id="1612048265",memory_type="heap"} 0.0 +netty_allocator_memory_used{allocator_type="PooledByteBufAllocator",id="1612048265",memory_type="direct"} 0.0 +netty_allocator_memory_used{allocator_type="UnpooledByteBufAllocator",id="2051878706",memory_type="direct"} 31.0 +netty_allocator_memory_used{allocator_type="UnpooledByteBufAllocator",id="125603901",memory_type="heap"} 0.0 +netty_allocator_memory_used{allocator_type="PooledByteBufAllocator",id="298568580",memory_type="heap"} 0.0 +netty_allocator_memory_used{allocator_type="PooledByteBufAllocator",id="298568580",memory_type="direct"} 196608.0 +netty_allocator_memory_used{allocator_type="UnpooledByteBufAllocator",id="125603901",memory_type="direct"} 0.0 +netty_allocator_memory_used{allocator_type="UnpooledByteBufAllocator",id="2051878706",memory_type="heap"} 128.0 +# TYPE system_load_average_1m gauge +# HELP system_load_average_1m The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time +system_load_average_1m 0.12939453125 +# TYPE worker_pool_usage_seconds summary +# HELP worker_pool_usage_seconds Time spent using resources from the pool +worker_pool_usage_seconds_count{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_usage_seconds_sum{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_usage_seconds_count{pool_name="vert.x-worker-thread",pool_type="worker"} 5.0 +worker_pool_usage_seconds_sum{pool_name="vert.x-worker-thread",pool_type="worker"} 0.020086393 +# TYPE worker_pool_usage_seconds_max gauge +# HELP worker_pool_usage_seconds_max Time spent using resources from the pool +worker_pool_usage_seconds_max{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_usage_seconds_max{pool_name="vert.x-worker-thread",pool_type="worker"} 0.015867397 +# TYPE process_cpu_usage gauge +# HELP process_cpu_usage The \"recent cpu usage\" for the Java Virtual Machine process +process_cpu_usage 5.638340716874748E-4 +# TYPE jvm_classes_loaded_classes gauge +# HELP jvm_classes_loaded_classes The number of classes that are currently loaded in the Java virtual machine +jvm_classes_loaded_classes 11776.0 +# TYPE jvm_gc_live_data_size_bytes gauge +# HELP jvm_gc_live_data_size_bytes Size of long-lived heap memory pool after reclamation +jvm_gc_live_data_size_bytes 0.0 +# TYPE jvm_threads_peak_threads gauge +# HELP jvm_threads_peak_threads The peak live thread count since the Java virtual machine started or peak was reset +jvm_threads_peak_threads 83.0 +# TYPE jvm_threads_started_threads counter +# HELP jvm_threads_started_threads The total number of application threads started in the JVM +jvm_threads_started_threads_total 95.0 +# TYPE jvm_buffer_count_buffers gauge +# HELP jvm_buffer_count_buffers An estimate of the number of buffers in the pool +jvm_buffer_count_buffers{id="mapped - 'non-volatile memory'"} 0.0 +jvm_buffer_count_buffers{id="mapped"} 0.0 +jvm_buffer_count_buffers{id="direct"} 13.0 +# TYPE example_prime_number counter +# HELP example_prime_number +example_prime_number_total{type="even"} 1.0 +# TYPE worker_pool_queue_size gauge +# HELP worker_pool_queue_size Number of pending elements in the waiting queue +worker_pool_queue_size{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_queue_size{pool_name="vert.x-worker-thread",pool_type="worker"} 0.0 +# TYPE netty_allocator_pooled_chunk_size gauge +# HELP netty_allocator_pooled_chunk_size +netty_allocator_pooled_chunk_size{allocator_type="PooledByteBufAllocator",id="1612048265"} 65536.0 +netty_allocator_pooled_chunk_size{allocator_type="PooledByteBufAllocator",id="298568580"} 65536.0 +# TYPE process_files_open_files gauge +# HELP process_files_open_files The open file descriptor count +process_files_open_files 417.0 +# TYPE netty_allocator_pooled_threadlocal_caches gauge +# HELP netty_allocator_pooled_threadlocal_caches +netty_allocator_pooled_threadlocal_caches{allocator_type="PooledByteBufAllocator",id="1612048265"} 0.0 +netty_allocator_pooled_threadlocal_caches{allocator_type="PooledByteBufAllocator",id="298568580"} 2.0 +# EOF diff --git a/quarkus/tests/test_e2e.py b/quarkus/tests/test_e2e.py new file mode 100644 index 0000000000000..9897eef7cee99 --- /dev/null +++ b/quarkus/tests/test_e2e.py @@ -0,0 +1,12 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.constants import ServiceCheck +from datadog_checks.dev.utils import assert_service_checks + + +def test_metrics(dd_agent_check, dd_environment): + aggregator = dd_agent_check() + aggregator.assert_metric('quarkus.process.cpu.usage') + aggregator.assert_service_check('quarkus.openmetrics.health', ServiceCheck.OK, count=1) + assert_service_checks(aggregator) diff --git a/quarkus/tests/test_unit.py b/quarkus/tests/test_unit.py new file mode 100644 index 0000000000000..9f96137aa2e8c --- /dev/null +++ b/quarkus/tests/test_unit.py @@ -0,0 +1,89 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +from pathlib import Path + +import pytest + +from datadog_checks.dev.utils import get_metadata_metrics +from datadog_checks.quarkus import QuarkusCheck + +EXPECTED_METRICS = [ + 'http_server.requests.seconds.max', + 'http_server.active_requests', + 'http_server.bytes_read.max', + 'http_server.bytes_written.max', + 'http_server.connections.seconds.max', + 'jvm.buffer.count_buffers', + 'jvm.buffer.memory_used.bytes', + 'jvm.buffer.total_capacity.bytes', + 'jvm.classes.loaded_classes', + 'jvm.gc.live_data_size.bytes', + 'jvm.gc.max_data_size.bytes', + 'jvm.gc.overhead', + 'jvm.memory.committed.bytes', + 'jvm.memory.max.bytes', + 'jvm.memory.usage_after_gc', + 'jvm.memory.used.bytes', + 'jvm.threads.daemon_threads', + 'jvm.threads.live_threads', + 'jvm.threads.peak_threads', + 'jvm.threads.states_threads', + 'netty.allocator.memory.pinned', + 'netty.allocator.memory.used', + 'netty.allocator.pooled.arenas', + 'netty.allocator.pooled.cache_size', + 'netty.allocator.pooled.chunk_size', + 'netty.allocator.pooled.threadlocal_caches', + 'netty.eventexecutor.tasks_pending', + 'process.cpu.usage', + 'process.files.max_files', + 'process.files.open_files', + 'process.uptime.seconds', + 'system.cpu.count', + 'system.cpu.usage', + 'system.load_average_1m', + 'worker_pool.active', + 'worker_pool.idle', + 'worker_pool.queue.delay.seconds.max', + 'worker_pool.queue.size', + 'worker_pool.ratio', + 'worker_pool.usage.seconds.max', +] + + +EXPECTED_SUMMARIES = [ + 'http_server.requests.seconds', + 'http_server.bytes_read', + 'http_server.bytes_written', + 'worker_pool.queue.delay.seconds', + 'worker_pool.usage.seconds', +] + + +def test_check(dd_run_check, aggregator, instance, mock_http_response): + # Given + mock_http_response(file_path=Path(__file__).parent.absolute() / "fixtures" / "quarkus_auto_metrics.txt") + check = QuarkusCheck('quarkus', {}, [instance]) + # When + dd_run_check(check) + # Then + for m in EXPECTED_METRICS: + aggregator.assert_metric('quarkus.' + m) + for sm in EXPECTED_SUMMARIES: + aggregator.assert_metric('quarkus.' + sm + '.count') + aggregator.assert_metric('quarkus.' + sm + '.sum') + aggregator.assert_all_metrics_covered() + aggregator.assert_metrics_using_metadata(get_metadata_metrics()) + + +def test_emits_critical_service_check_when_service_is_down(dd_run_check, aggregator, instance, mock_http_response): + # Given + mock_http_response(status_code=404) + check = QuarkusCheck('quarkus', {}, [instance]) + # When + with pytest.raises(Exception, match="requests.exceptions.HTTPError"): + dd_run_check(check) + # Then + aggregator.assert_service_check('quarkus.openmetrics.health', QuarkusCheck.CRITICAL) diff --git a/slurm/CHANGELOG.md b/slurm/CHANGELOG.md index 93fbb1fcc2497..2d15ba2453de4 100644 --- a/slurm/CHANGELOG.md +++ b/slurm/CHANGELOG.md @@ -14,7 +14,7 @@ * Bump base package dependency to get fixed pyyaml. ([#19156](https://github.com/DataDog/integrations-core/pull/19156)) -## 1.0.1 / 2024-11-25 +## 1.0.1 / 2024-11-25 / Agent 7.60.0 ***Fixed***: diff --git a/slurm/assets/dashboards/slurm_overview.json b/slurm/assets/dashboards/slurm_overview.json index d68adff9ba7da..e394b8b32875e 100644 --- a/slurm/assets/dashboards/slurm_overview.json +++ b/slurm/assets/dashboards/slurm_overview.json @@ -324,7 +324,7 @@ { "id": 5935466024454224, "definition": { - "title": "Kyverno Monitor Summary", + "title": "Slurm Monitor Summary", "type": "manage_status", "display_format": "countsAndList", "color_preference": "text", diff --git a/slurm/manifest.json b/slurm/manifest.json index 932d85ef4b6ef..923af7a6b1b65 100644 --- a/slurm/manifest.json +++ b/slurm/manifest.json @@ -2,7 +2,7 @@ "manifest_version": "2.0.0", "app_uuid": "a1e88183-da10-4651-bac8-843bdb640af7", "app_id": "slurm", - "display_on_public_website": false, + "display_on_public_website": true, "tile": { "overview": "README.md#Overview", "configuration": "README.md#Setup", diff --git a/snmp/tests/test_e2e_core.py b/snmp/tests/test_e2e_core.py index bb38a87de4609..55bf20f1103aa 100644 --- a/snmp/tests/test_e2e_core.py +++ b/snmp/tests/test_e2e_core.py @@ -497,78 +497,6 @@ def test_e2e_meraki_cloud_controller(dd_agent_check): aggregator.assert_metrics_using_metadata(get_metadata_metrics()) -def test_e2e_core_detect_metrics_using_apc_ups_metrics(dd_agent_check): - config = common.generate_container_instance_config([]) - instance = config['instances'][0] - instance.update( - { - 'snmp_version': 1, - 'community_string': 'apc_ups_no_sysobjectid', - 'experimental_detect_metrics_enabled': True, - } - ) - config['init_config']['loader'] = 'core' - instance = config['instances'][0] - aggregator = common.dd_agent_check_wrapper(dd_agent_check, config, rate=True) - - global_metric_tags = [ - # metric_tags from apc_ups.yaml - 'model:APC Smart-UPS 600', - 'firmware_version:2.0.3-test', - 'serial_num:test_serial', - 'ups_name:testIdentName', - # metric_tags from _base.yaml - 'snmp_host:APC_UPS_NAME', - 'device_hostname:APC_UPS_NAME', - ] - device_ip = instance['ip_address'] - - tags = global_metric_tags + [ - 'device_namespace:default', - "snmp_device:{}".format(device_ip), - "device_ip:{}".format(device_ip), - "device_id:default:{}".format(device_ip), - ] - - common.assert_common_metrics(aggregator, tags, is_e2e=True, loader='core') - - for metric in metrics.APC_UPS_METRICS: - aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=2) - aggregator.assert_metric( - 'snmp.upsAdvBatteryFullCapacity_userMetric', metric_type=aggregator.GAUGE, tags=tags, count=2 - ) - for metric, value in metrics.APC_UPS_UPS_BASIC_STATE_OUTPUT_STATE_METRICS: - aggregator.assert_metric(metric, value=value, metric_type=aggregator.GAUGE, count=2, tags=tags) - - group_state_tags = tags + [ - 'outlet_group_name:test_outlet', - 'ups_outlet_group_status_group_state:ups_outlet_group_status_unknown', - ] - - aggregator.assert_metric( - 'snmp.upsOutletGroupStatusGroupState', - metric_type=aggregator.GAUGE, - tags=group_state_tags, - ) - - interface_tags = ['interface:mgmt', 'interface_alias:desc1', 'interface_index:32'] + tags - aggregator.assert_metric( - 'snmp.ifInErrors', - metric_type=aggregator.COUNT, - tags=interface_tags, - ) - aggregator.assert_metric( - 'snmp.ifInErrors.rate', - metric_type=aggregator.GAUGE, - tags=interface_tags, - ) - if_in_error_metrics = aggregator.metrics('snmp.ifInErrors.rate') - assert len(if_in_error_metrics) == 1 - assert if_in_error_metrics[0].value > 0 - - aggregator.assert_all_metrics_covered() - - def test_e2e_core_cisco_csr(dd_agent_check): config = common.generate_container_instance_config([]) instance = config['instances'][0] diff --git a/sophos_central_cloud/manifest.json b/sophos_central_cloud/manifest.json index b7c25e0f62b2c..c5563bf28c8ee 100644 --- a/sophos_central_cloud/manifest.json +++ b/sophos_central_cloud/manifest.json @@ -2,7 +2,7 @@ "manifest_version": "2.0.0", "app_uuid": "7293cd88-ceda-4094-94cd-09851f203f0e", "app_id": "sophos-central-cloud", - "display_on_public_website": false, + "display_on_public_website": true, "tile": { "overview": "README.md#Overview", "configuration": "README.md#Setup", diff --git a/spark/CHANGELOG.md b/spark/CHANGELOG.md index bb19a1f1f438e..1e857c299aa86 100644 --- a/spark/CHANGELOG.md +++ b/spark/CHANGELOG.md @@ -2,7 +2,7 @@ -## 6.1.0 / 2024-10-31 +## 6.1.0 / 2024-10-31 / Agent 7.60.0 ***Added***: diff --git a/sqlserver/CHANGELOG.md b/sqlserver/CHANGELOG.md index 58032c651ecc2..4cb8686237ec3 100644 --- a/sqlserver/CHANGELOG.md +++ b/sqlserver/CHANGELOG.md @@ -24,7 +24,7 @@ * Fix duplicate deadlock events ([#19139](https://github.com/DataDog/integrations-core/pull/19139)) * Fix poor query signature correlation for deadlocks. ([#19142](https://github.com/DataDog/integrations-core/pull/19142)) -## 20.1.1 / 2024-11-25 +## 20.1.1 / 2024-11-25 / Agent 7.60.0 ***Fixed***: diff --git a/sqlserver/assets/configuration/spec.yaml b/sqlserver/assets/configuration/spec.yaml index 9497f4ef208be..f0250160041cd 100644 --- a/sqlserver/assets/configuration/spec.yaml +++ b/sqlserver/assets/configuration/spec.yaml @@ -137,7 +137,9 @@ files: description: | Configure collection of AlwaysOn availability group metrics. - When the `ao_metrics.enabled` is True, use `ao_metrics.availability_group` to specify the + Set `ao_metrics.enabled` to true to enable collection of AlwaysOn metrics. Defaults to false. + + When `ao_metrics.enabled` is True, use `ao_metrics.availability_group` to specify the resource group id of a specific availability group that you would like to monitor. If no availability group is specified, then we will collect AlwaysOn metrics for all availability groups on the current replica. @@ -163,6 +165,9 @@ files: - name: db_backup_metrics description: | Configure collection of database backup metrics. + + Set `db_backup_metrics.enabled` to true to enable collection of database backup metrics. Defaults to true. + Use `db_backup_metrics.collection_interval` to set the interval (in seconds) for the collection of database backup metrics. Defaults to 300 seconds (5 minutes). If you intend on updating this value, it is strongly recommended to use a consistent value throughout all SQL Server agent deployments. @@ -180,6 +185,8 @@ files: - name: db_files_metrics description: | Configure collection of database files metrics. + + Set `db_files_metrics.enabled` to true to enable collection of database files metrics. Defaults to true. hidden: true value: type: object @@ -190,6 +197,8 @@ files: - name: db_stats_metrics description: | Configure collection of database stats metrics + + Set `db_stats_metrics.enabled` to true to enable collection of database stats metrics. Defaults to true. hidden: true value: type: object @@ -203,6 +212,9 @@ files: Note these queries can be resource intensive on large datasets. Recommend to limit these via autodiscovery or specific database instances. + Set `db_fragmentation_metrics.enabled` to true to enable collection of + database index fragmentation statistics. Defaults to false. + Use `db_fragmentation_metrics.enabled_tempdb` to enable collection of database index fragmentation statistics in tempdb database from the `sys.dm_db_index_physical_stats` DMF. By default, we do not collect index fragmentation statistics in the tempdb database, as those queries @@ -219,7 +231,7 @@ files: - name: enabled type: boolean example: false - - name: enabled_tempdb + - name: enabled_tempdb type: boolean example: false - name: collection_interval @@ -228,8 +240,11 @@ files: display_default: 300 - name: fci_metrics description: | - Configure collection of failover Cluster Instance metrics. Note that these metrics + Configure collection of Failover Cluster Instance metrics. Note that these metrics requires a SQLServer set up with Failover Clustering enabled. + + Set `fci_metrics.enabled` to true to enable collection of Failover Cluster Instance metrics. + Defaults to false. value: type: object properties: @@ -239,6 +254,8 @@ files: - name: file_stats_metrics description: | Configure collection of file stats metrics. + + Set `file_stats_metrics.enabled` to true to enable collection of file stats metrics. Defaults to true. hidden: true value: type: object @@ -252,6 +269,9 @@ files: Because the `sys.dm_db_index_usage_stats` view is scoped to the current database, enable `database_autodiscovery` or set `database`. + Set `index_usage_metrics.enabled` to true to enable collection of user table index usage statistics. + Defaults to false. + Use `index_usage_metrics.enabled_tempdb` to enable collection of user table index usage statistics in tempdb database from the `sys.dm_db_index_usage_stats` DMV. By default, we do not collect index usage statistics in the tempdb database, as those queries @@ -279,6 +299,9 @@ files: description: | Configure collection of server-level instance metrics. When setting up multiple instances for different databases on the same host these metrics will be duplicated unless this option is turned off. + + Set `instance_metrics.enabled` to true to enable collection of server-level instance metrics. + Defaults to true. value: type: object properties: @@ -288,6 +311,9 @@ files: - name: master_files_metrics description: | Configure collection of database file size and state from `sys.master_files` + + Set `master_files_metrics.enabled` to true to enable collection of database file size and state metrics. + Defaults to false. value: type: object properties: @@ -299,6 +325,9 @@ files: Configure collection of metrics for a log shipping setup. Required to run against the primary instance in a transaction log shipping configuration. Note that the Datadog user needs to be present in msdb and must be added to the db_datareader role. + + Set `primary_log_shipping_metrics.enabled` to true to enable collection of primary log shipping metrics. + Defaults to false. value: type: object properties: @@ -310,6 +339,9 @@ files: Configure collection of metrics for a log shipping setup. Required to run against the secondary instance in a transaction log shipping configuration. Note that the Datadog user needs to be present in msdb and must be added to the db_datareader role. + + Set `secondary_log_shipping_metrics.enabled` to true to enable collection of secondary log shipping metrics. + Defaults to false. value: type: object properties: @@ -319,6 +351,8 @@ files: - name: server_state_metrics description: | Configure collection of server state metrics + + Set `server_state_metrics.enabled` to true to enable collection of server state metrics. Defaults to true. hidden: true value: type: object @@ -329,6 +363,9 @@ files: - name: task_scheduler_metrics description: | Configure collection of additional Task and Scheduler metrics. + + Set `task_scheduler_metrics.enabled` to true to enable collection of additional Task and Scheduler metrics. + Defaults to false. value: type: object properties: @@ -338,15 +375,20 @@ files: - name: tempdb_file_space_usage_metrics description: | Configure collection of tempdb file space usage metrics for how space is used in tempdb data files. + + Set `tempdb_file_space_usage_metrics.enabled` to true to enable collection of + tempdb file space usage metrics. Defaults to true. value: type: object properties: - name: enabled type: boolean - example: false + example: true - name: xe_metrics description: | Configure collection of extended events (XE) metrics. + + Set `xe_metrics.enabled` to true to enable collection of extended events metrics. Defaults to false. value: type: object properties: @@ -354,7 +396,7 @@ files: type: boolean example: false - name: agent_jobs - description: Configure collection of agent jobs events and metrics + description: Configure collection of SQL Server Agent jobs events and metrics options: - name: enabled description: | diff --git a/sqlserver/changelog.d/19266.added b/sqlserver/changelog.d/19266.added new file mode 100644 index 0000000000000..fdd9c88d9bf1f --- /dev/null +++ b/sqlserver/changelog.d/19266.added @@ -0,0 +1 @@ +Send schema name as part of index usage metrics diff --git a/sqlserver/changelog.d/19277.added b/sqlserver/changelog.d/19277.added new file mode 100644 index 0000000000000..527d2697cc0d2 --- /dev/null +++ b/sqlserver/changelog.d/19277.added @@ -0,0 +1 @@ +Add schema tag to db_fragmentation metrics for sqlserver diff --git a/sqlserver/datadog_checks/sqlserver/config_models/instance.py b/sqlserver/datadog_checks/sqlserver/config_models/instance.py index 1f83cbd0ccc1d..5c43871b087f7 100644 --- a/sqlserver/datadog_checks/sqlserver/config_models/instance.py +++ b/sqlserver/datadog_checks/sqlserver/config_models/instance.py @@ -194,7 +194,7 @@ class TempdbFileSpaceUsageMetrics(BaseModel): arbitrary_types_allowed=True, frozen=True, ) - enabled: Optional[bool] = Field(None, examples=[False]) + enabled: Optional[bool] = Field(None, examples=[True]) class XeMetrics(BaseModel): diff --git a/sqlserver/datadog_checks/sqlserver/data/conf.yaml.example b/sqlserver/datadog_checks/sqlserver/data/conf.yaml.example index 7ea10e54d507d..bcf481cdda875 100644 --- a/sqlserver/datadog_checks/sqlserver/data/conf.yaml.example +++ b/sqlserver/datadog_checks/sqlserver/data/conf.yaml.example @@ -134,7 +134,9 @@ instances: ## @param ao_metrics - mapping - optional ## Configure collection of AlwaysOn availability group metrics. ## - ## When the `ao_metrics.enabled` is True, use `ao_metrics.availability_group` to specify the + ## Set `ao_metrics.enabled` to true to enable collection of AlwaysOn metrics. Defaults to false. + ## + ## When `ao_metrics.enabled` is True, use `ao_metrics.availability_group` to specify the ## resource group id of a specific availability group that you would like to monitor. ## If no availability group is specified, then we will collect AlwaysOn metrics for all ## availability groups on the current replica. @@ -152,6 +154,9 @@ instances: ## Note these queries can be resource intensive on large datasets. Recommend to limit these via ## autodiscovery or specific database instances. ## + ## Set `db_fragmentation_metrics.enabled` to true to enable collection of + ## database index fragmentation statistics. Defaults to false. + ## ## Use `db_fragmentation_metrics.enabled_tempdb` to enable collection of database index fragmentation statistics ## in tempdb database from the `sys.dm_db_index_physical_stats` DMF. ## By default, we do not collect index fragmentation statistics in the tempdb database, as those queries @@ -166,8 +171,11 @@ instances: # db_fragmentation_metrics: {} ## @param fci_metrics - mapping - optional - ## Configure collection of failover Cluster Instance metrics. Note that these metrics + ## Configure collection of Failover Cluster Instance metrics. Note that these metrics ## requires a SQLServer set up with Failover Clustering enabled. + ## + ## Set `fci_metrics.enabled` to true to enable collection of Failover Cluster Instance metrics. + ## Defaults to false. # # fci_metrics: {} @@ -176,6 +184,9 @@ instances: ## Because the `sys.dm_db_index_usage_stats` view is scoped to the current database, enable ## `database_autodiscovery` or set `database`. ## + ## Set `index_usage_metrics.enabled` to true to enable collection of user table index usage statistics. + ## Defaults to false. + ## ## Use `index_usage_metrics.enabled_tempdb` to enable collection of user table index usage statistics in tempdb ## database from the `sys.dm_db_index_usage_stats` DMV. ## By default, we do not collect index usage statistics in the tempdb database, as those queries @@ -192,11 +203,17 @@ instances: ## @param instance_metrics - mapping - optional ## Configure collection of server-level instance metrics. When setting up multiple instances for ## different databases on the same host these metrics will be duplicated unless this option is turned off. + ## + ## Set `instance_metrics.enabled` to true to enable collection of server-level instance metrics. + ## Defaults to true. # # instance_metrics: {} ## @param master_files_metrics - mapping - optional ## Configure collection of database file size and state from `sys.master_files` + ## + ## Set `master_files_metrics.enabled` to true to enable collection of database file size and state metrics. + ## Defaults to false. # # master_files_metrics: {} @@ -204,6 +221,9 @@ instances: ## Configure collection of metrics for a log shipping setup. Required to run against the ## primary instance in a transaction log shipping configuration. Note that ## the Datadog user needs to be present in msdb and must be added to the db_datareader role. + ## + ## Set `primary_log_shipping_metrics.enabled` to true to enable collection of primary log shipping metrics. + ## Defaults to false. # # primary_log_shipping_metrics: {} @@ -211,25 +231,36 @@ instances: ## Configure collection of metrics for a log shipping setup. Required to run against the ## secondary instance in a transaction log shipping configuration. Note that ## the Datadog user needs to be present in msdb and must be added to the db_datareader role. + ## + ## Set `secondary_log_shipping_metrics.enabled` to true to enable collection of secondary log shipping metrics. + ## Defaults to false. # # secondary_log_shipping_metrics: {} ## @param task_scheduler_metrics - mapping - optional ## Configure collection of additional Task and Scheduler metrics. + ## + ## Set `task_scheduler_metrics.enabled` to true to enable collection of additional Task and Scheduler metrics. + ## Defaults to false. # # task_scheduler_metrics: {} ## @param tempdb_file_space_usage_metrics - mapping - optional ## Configure collection of tempdb file space usage metrics for how space is used in tempdb data files. + ## + ## Set `tempdb_file_space_usage_metrics.enabled` to true to enable collection of + ## tempdb file space usage metrics. Defaults to true. # # tempdb_file_space_usage_metrics: {} ## @param xe_metrics - mapping - optional ## Configure collection of extended events (XE) metrics. + ## + ## Set `xe_metrics.enabled` to true to enable collection of extended events metrics. Defaults to false. # # xe_metrics: {} - ## Configure collection of agent jobs events and metrics + ## Configure collection of SQL Server Agent jobs events and metrics # # agent_jobs: diff --git a/sqlserver/datadog_checks/sqlserver/database_metrics/db_fragmentation_metrics.py b/sqlserver/datadog_checks/sqlserver/database_metrics/db_fragmentation_metrics.py index 1411449ac5e21..3e31f85d219b4 100644 --- a/sqlserver/datadog_checks/sqlserver/database_metrics/db_fragmentation_metrics.py +++ b/sqlserver/datadog_checks/sqlserver/database_metrics/db_fragmentation_metrics.py @@ -14,6 +14,7 @@ "query": """SELECT DB_NAME(DDIPS.database_id) as database_name, OBJECT_NAME(DDIPS.object_id, DDIPS.database_id) as object_name, + OBJECT_SCHEMA_NAME(DDIPS.object_id, DDIPS.database_id) as "schema", DDIPS.index_id as index_id, I.name as index_name, DDIPS.fragment_count as fragment_count, @@ -28,6 +29,7 @@ "columns": [ {"name": "database_name", "type": "tag"}, {"name": "object_name", "type": "tag"}, + {"name": "schema", "type": "tag"}, {"name": "index_id", "type": "tag"}, {"name": "index_name", "type": "tag"}, {"name": "database.fragment_count", "type": "gauge"}, diff --git a/sqlserver/datadog_checks/sqlserver/database_metrics/index_usage_metrics.py b/sqlserver/datadog_checks/sqlserver/database_metrics/index_usage_metrics.py index d91a935c19133..6fd5d9aac1a5d 100644 --- a/sqlserver/datadog_checks/sqlserver/database_metrics/index_usage_metrics.py +++ b/sqlserver/datadog_checks/sqlserver/database_metrics/index_usage_metrics.py @@ -17,6 +17,7 @@ WHEN ind.name IS NULL THEN 'HeapIndex_' + OBJECT_NAME(ind.object_id) ELSE ind.name END AS index_name, + OBJECT_SCHEMA_NAME(ind.object_id, ixus.database_id) as "schema", OBJECT_NAME(ind.object_id) as table_name, user_seeks, user_scans, @@ -26,11 +27,12 @@ INNER JOIN sys.dm_db_index_usage_stats ixus ON ixus.index_id = ind.index_id AND ixus.object_id = ind.object_id WHERE OBJECTPROPERTY(ind.object_id, 'IsUserTable') = 1 AND DB_NAME(ixus.database_id) = db_name() - GROUP BY ixus.database_id, OBJECT_NAME(ind.object_id), ind.name, user_seeks, user_scans, user_lookups, user_updates + GROUP BY ixus.database_id, ind.object_id, ind.name, user_seeks, user_scans, user_lookups, user_updates """, "columns": [ {"name": "db", "type": "tag"}, {"name": "index_name", "type": "tag"}, + {"name": "schema", "type": "tag"}, {"name": "table", "type": "tag"}, {"name": "index.user_seeks", "type": "monotonic_count"}, {"name": "index.user_scans", "type": "monotonic_count"}, diff --git a/sqlserver/hatch.toml b/sqlserver/hatch.toml index 2a9630b10e673..b16801ff760c7 100644 --- a/sqlserver/hatch.toml +++ b/sqlserver/hatch.toml @@ -46,10 +46,6 @@ PIP_EXTRA_INDEX_URL = "https://datadoghq.dev/ci-wheels/bin" env.GITHUB_ACTIONS.e2e-env = { value = false, if = ["true"], platform = ["windows"] } platform.windows.env-vars = [ "COMPOSE_FOLDER=compose-windows", - # we need SETUPTOOLS_USE_DISTUTILS=stdlib for setuptools versions 60+ in order for adodbapi to be able to install - # correctly for python3 on windows. If not set installation fails with the following error: - # in ImportError: cannot import name 'build_py_2to3' from 'distutils.command.build_py' - "SETUPTOOLS_USE_DISTUTILS=stdlib", ] matrix.os.platforms = [ { value = "windows", if = ["windows"] }, diff --git a/sqlserver/metadata.csv b/sqlserver/metadata.csv index 0106c608bcbeb..2a8c134ad37d6 100644 --- a/sqlserver/metadata.csv +++ b/sqlserver/metadata.csv @@ -36,15 +36,15 @@ sqlserver.buffer.page_writes,gauge,,page,,Indicates the number of physical datab sqlserver.cache.object_counts,gauge,,object,,Number of cache objects in the cache. (Perf. Counter: `Plan Cache - Cache Object Counts`),0,sql_server,cache obj counts,, sqlserver.cache.pages,gauge,,object,,Number of 8-kilobyte (KB) pages used by cache objects. (Perf. Counter: `Plan Cache - Cache Pages`),0,sql_server,cache pages,, sqlserver.database.active_transactions,gauge,,transaction,,Number of active transactions across all databases on the SQL Server instance. Tags: `db`. (Perf. Counter: `Databases - Active Transactions`).,0,sql_server,database trans active,, -sqlserver.database.avg_fragment_size_in_pages,gauge,,,,"The average number of pages in one fragment on the leaf level of an IN_ROW_DATA allocation unit. Tags: `db`, `object_name`, `index_id`, `index_name`",0,sql_server,avg fragment size in pages,, -sqlserver.database.avg_fragmentation_in_percent,gauge,,,,"Logical fragmentation for indexes, or extent fragmentation for heaps in the IN_ROW_DATA allocation unit. Tags: `db`, `object_name`, `index_id`, `index_name`",0,sql_server,avg fragmentation percent,, +sqlserver.database.avg_fragment_size_in_pages,gauge,,,,"The average number of pages in one fragment on the leaf level of an IN_ROW_DATA allocation unit. Tags: `db`, `object_name`, `schema`, `index_id`, `index_name`",0,sql_server,avg fragment size in pages,, +sqlserver.database.avg_fragmentation_in_percent,gauge,,,,"Logical fragmentation for indexes, or extent fragmentation for heaps in the IN_ROW_DATA allocation unit. Tags: `db`, `object_name`, `schema`, `index_id`, `index_name`",0,sql_server,avg fragmentation percent,, sqlserver.database.backup_count,gauge,,,,The total count of successful backups made for a database. **Note:** This metric is not emitted on Azure managed databases. Tags: `db`,0,sql_server,backup count,, sqlserver.database.backup_restore_throughput,gauge,,,,Read/write throughput for backup and restore operations of a database per second. Tags: `db`. (Perf. Counter: `Databases - Backup/Restore Throughput/sec`),0,sql_server,database restore throughput,, sqlserver.database.files.size,gauge,,kibibyte,,"Current size of the database file. Tags: `db`, `file_id`, `file_type`, `file_name`, `file_location`, `database_files_state_desc`",0,sql_server,database file size,, sqlserver.database.files.space_used,gauge,,kibibyte,,"Current used space of the database file. Tags: `db`, `file_id`, `file_type`, `file_name`, `file_location`, `database_files_state_desc`",0,sql_server,database file used size,, sqlserver.database.files.state,gauge,,,,"Database file state: 0 = Online, 1 = Restoring, 2 = Recovering, 3 = Recovery_Pending, 4 = Suspect, 5 = Unknown, 6 = Offline, 7 = Defunct. Tags: `db`, `file_id`, `file_type`, `file_name`, `file_location`, `database_files_state_desc`",0,sql_server,database file status,, -sqlserver.database.fragment_count,gauge,,,,"The number of fragments in the leaf level of an IN_ROW_DATA allocation unit. Tags: `db`, `object_name`, `index_id`, `index_name`",0,sql_server,fragment count,, -sqlserver.database.index_page_count,gauge,,,,"Total number of index or data pages. Tags: `db`, `object_name`, `index_id`, `index_name`",0,sql_server,page count per index,, +sqlserver.database.fragment_count,gauge,,,,"The number of fragments in the leaf level of an IN_ROW_DATA allocation unit. Tags: `db`, `object_name`, `schema`, `index_id`, `index_name`",0,sql_server,fragment count,, +sqlserver.database.index_page_count,gauge,,,,"Total number of index or data pages. Tags: `db`, `object_name`, `schema`, `index_id`, `index_name`",0,sql_server,page count per index,, sqlserver.database.is_in_standby,gauge,,,,"Whether or not the database is read-only for restore log. Tags: `db`, `database_state_desc`, `database_recovery_model_desc`",0,sql_server,is in standby,, sqlserver.database.is_read_only,gauge,,,,"Whether or not the database is marked as READ_ONLY. 0 = READ_WRITE, 1 = READ_ONLY. Tags: `db`, `database_state_desc`, `database_recovery_model_desc`",0,sql_server,is read-only,, sqlserver.database.is_sync_with_backup,gauge,,,,"Whether or not the database is marked for replication synchronization with backup. 0 = Not marked for replication sync, 1 = Marked for replication sync. Tags: `db`, `database_state_desc`, `database_recovery_model_desc`",0,sql_server,is sync with backup,, @@ -69,10 +69,10 @@ sqlserver.files.write_io_stall,count,,millisecond,,"Total time that users waited sqlserver.files.write_io_stall_queued,count,,millisecond,,"Total latency from IO governance pools for writes on the file. Tags: `logical_name`, `file_location`, `db`, `state`",-1,sql_server,write io queue time on database file,, sqlserver.files.writes,count,,write,second,"Number of writes issued on the file. Tags: `logical_name`, `file_location`, `db`, `state`",0,sql_server,writes on database file,, sqlserver.files.written_bytes,count,,byte,second,"Bytes written to the file. Tags: `logical_name`, `file_location`, `db`, `state`",0,sql_server,bytes written to the database file,, -sqlserver.index.user_lookups,count,,occurrence,,"Number of bookmark lookups by user queries. Tags: `db`, `table`, `index_name`",-1,sql_server,lookups by user queries,, -sqlserver.index.user_scans,count,,scan,,"Number of scans by user queries that did not use 'seek' predicate. Tags: `db`, `table`, `index_name`",-1,sql_server,scans by user queries,, -sqlserver.index.user_seeks,count,,occurrence,,"Number of seeks by user queries. Tags: `db`, `table`, `index_name`",-1,sql_server,seeks by user queries,, -sqlserver.index.user_updates,count,,update,,"Number of updates by user queries. This includes Insert, Delete, and Updates representing the number of operations done, not the actual rows affected. Tags: `db`, `table`, `index_name`",-1,sql_server,updates by user queries,, +sqlserver.index.user_lookups,count,,occurrence,,"Number of bookmark lookups by user queries. Tags: `db`, `table`, `index_name`, `schema`",-1,sql_server,lookups by user queries,, +sqlserver.index.user_scans,count,,scan,,"Number of scans by user queries that did not use 'seek' predicate. Tags: `db`, `table`, `index_name`, `schema`",-1,sql_server,scans by user queries,, +sqlserver.index.user_seeks,count,,occurrence,,"Number of seeks by user queries. Tags: `db`, `table`, `index_name`, `schema`",-1,sql_server,seeks by user queries,, +sqlserver.index.user_updates,count,,update,,"Number of updates by user queries. This includes Insert, Delete, and Updates representing the number of operations done, not the actual rows affected. Tags: `db`, `table`, `index_name`, `schema`",-1,sql_server,updates by user queries,, sqlserver.latches.latch_wait_time,gauge,,millisecond,,Average latch wait time (in milliseconds) for latch requests that had to wait. (Perf. Counter: `Locks - Average Latch Wait Time (ms)`),0,sql_server,avg latch wait time,, sqlserver.latches.latch_waits,gauge,,request,second,Number of latch requests that could not be granted immediately. (Perf. Counter: `Locks - Latch Waits/sec`),-1,sql_server,latch waits,, sqlserver.locks.deadlocks,gauge,,request,second,Number of lock requests per second that resulted in a deadlock. (Perf. Counter: `Locks - Number of Deadlocks/sec`),0,sql_server,deadlocks,, diff --git a/sqlserver/tests/test_database_metrics.py b/sqlserver/tests/test_database_metrics.py index 7203196159b4b..2bc74f953681d 100644 --- a/sqlserver/tests/test_database_metrics.py +++ b/sqlserver/tests/test_database_metrics.py @@ -826,19 +826,19 @@ def test_sqlserver_index_usage_metrics( mocked_results_non_tempdb = [ [ - ('master', 'PK__patch_ac__09EA1DC2BD2BC49C', 'patch_action_execution_state', 36, 0, 0, 0), - ('master', 'PK__rds_comp__2E7CCD4A9E2910C9', 'rds_component_version', 0, 5, 0, 0), + ('master', 'PK__patch_ac__09EA1DC2BD2BC49C', 'dbo', 'patch_action_execution_state', 36, 0, 0, 0), + ('master', 'PK__rds_comp__2E7CCD4A9E2910C9', 'dbo', 'rds_component_version', 0, 5, 0, 0), ], [ - ('msdb', 'PK__backupse__21F79AAB9439648C', 'backupset', 0, 1, 0, 0), + ('msdb', 'PK__backupse__21F79AAB9439648C', 'dbo', 'backupset', 0, 1, 0, 0), ], [ - ('datadog_test-1', 'idx_something', 'some_table', 10, 60, 12, 18), - ('datadog_test-1', 'idx_something_else', 'some_table', 20, 30, 40, 50), + ('datadog_test-1', 'idx_something', 'dbo', 'some_table', 10, 60, 12, 18), + ('datadog_test-1', 'idx_something_else', 'dbo', 'some_table', 20, 30, 40, 50), ], ] mocked_results_tempdb = [ - ('tempdb', 'PK__dmv_view__B5A34EE25D72CBFE', 'dmv_view_run_history', 1500, 0, 0, 49), + ('tempdb', 'PK__dmv_view__B5A34EE25D72CBFE', 'dbo', 'dmv_view_run_history', 1500, 0, 0, 49), ] mocked_results = mocked_results_non_tempdb if include_index_usage_metrics_tempdb: @@ -870,11 +870,12 @@ def test_sqlserver_index_usage_metrics( tags = sqlserver_check._config.tags for result in mocked_results: for row in result: - db, index_name, table, *metric_values = row + db, index_name, schema, table, *metric_values = row metrics = zip(index_usage_metrics.metric_names()[0], metric_values) expected_tags = [ f'db:{db}', f'index_name:{index_name}', + f'schema:{schema}', f'table:{table}', ] + tags for metric_name, metric_value in metrics: @@ -921,29 +922,39 @@ def test_sqlserver_db_fragmentation_metrics( print(instance_docker_metrics) mocked_results = [ [ - ('master', 'spt_fallback_db', 0, None, 0, 0.0, 0, 0.0), - ('master', 'spt_fallback_dev', 0, None, 0, 0.0, 0, 0.0), - ('master', 'spt_fallback_usg', 0, None, 0, 0.0, 0, 0.0), - ('master', 'spt_monitor', 0, None, 1, 1.0, 1, 0.0), - ('master', 'MSreplication_options', 0, None, 1, 1.0, 1, 0.0), + ('master', 'spt_fallback_db', 'dbo', 0, None, 0, 0.0, 0, 0.0), + ('master', 'spt_fallback_dev', 'dbo', 0, None, 0, 0.0, 0, 0.0), + ('master', 'spt_fallback_usg', 'dbo', 0, None, 0, 0.0, 0, 0.0), + ('master', 'spt_monitor', 'dbo', 0, None, 1, 1.0, 1, 0.0), + ('master', 'MSreplication_options', 'dbo', 0, None, 1, 1.0, 1, 0.0), ], [ - ('msdb', 'syscachedcredentials', 1, 'PK__syscache__F6D56B562DA81DC6', 0, 0.0, 0, 0.0), - ('msdb', 'syscollector_blobs_internal', 1, 'PK_syscollector_blobs_internal_paremeter_name', 0, 0.0, 0, 0.0), + ('msdb', 'syscachedcredentials', 'dbo', 1, 'PK__syscache__F6D56B562DA81DC6', 0, 0.0, 0, 0.0), + ( + 'msdb', + 'syscollector_blobs_internal', + 'dbo', + 1, + 'PK_syscollector_blobs_internal_paremeter_name', + 0, + 0.0, + 0, + 0.0, + ), ], - [('datadog_test-1', 'Ï‘ings', 1, 'thingsindex', 1, 1.0, 1, 0.0)], + [('datadog_test-1', 'Ï‘ings', 'dbo', 1, 'thingsindex', 1, 1.0, 1, 0.0)], ] mocked_results_tempdb = [ - [('tempdb', '#TempExample__000000000008', 1, 'PK__#TempExa__3214EC278A26D67E', 1, 1.0, 1, 0.0)], + [('tempdb', '#TempExample__000000000008', 'dbo', 1, 'PK__#TempExa__3214EC278A26D67E', 1, 1.0, 1, 0.0)], ] if db_fragmentation_object_names: instance_docker_metrics['db_fragmentation_object_names'] = db_fragmentation_object_names mocked_results = [ [ - ('master', 'spt_fallback_db', 0, None, 0, 0.0, 0, 0.0), - ('master', 'spt_fallback_dev', 0, None, 0, 0.0, 0, 0.0), - ('master', 'spt_fallback_usg', 0, None, 0, 0.0, 0, 0.0), + ('master', 'spt_fallback_db', 'dbo', 0, None, 0, 0.0, 0, 0.0), + ('master', 'spt_fallback_dev', 'dbo', 0, None, 0, 0.0, 0, 0.0), + ('master', 'spt_fallback_usg', 'dbo', 0, None, 0, 0.0, 0, 0.0), ], [], [], @@ -982,12 +993,13 @@ def test_sqlserver_db_fragmentation_metrics( tags = sqlserver_check._config.tags for result in mocked_results: for row in result: - database_name, object_name, index_id, index_name, *metric_values = row + database_name, object_name, schema, index_id, index_name, *metric_values = row metrics = zip(db_fragmentation_metrics.metric_names()[0], metric_values) expected_tags = [ f'db:{database_name}', f'database_name:{database_name}', f'object_name:{object_name}', + f'schema:{schema}', f'index_id:{index_id}', f'index_name:{index_name}', ] + tags diff --git a/sqlserver/tests/test_integration.py b/sqlserver/tests/test_integration.py index ad0e9e8d83c68..41bab4e799448 100644 --- a/sqlserver/tests/test_integration.py +++ b/sqlserver/tests/test_integration.py @@ -840,6 +840,7 @@ def execute_query(query, params): expected_tags = check._config.tags + [ 'db:datadog_test-1', 'table:Ï‘ings', + 'schema:dbo', 'index_name:thingsindex', ] for m in DATABASE_INDEX_METRICS: diff --git a/sqlserver/tests/test_metrics.py b/sqlserver/tests/test_metrics.py index c6e655eff51e8..0513e96660b43 100644 --- a/sqlserver/tests/test_metrics.py +++ b/sqlserver/tests/test_metrics.py @@ -255,6 +255,7 @@ def test_check_index_usage_metrics( 'db:{}'.format(instance_docker_metrics['database']), 'index_name:thingsindex', 'table:Ï‘ings', + 'schema:dbo', ] aggregator.assert_metric(metric_name, tags=expected_tags, hostname=sqlserver_check.resolved_hostname, count=1) @@ -352,7 +353,7 @@ def test_check_db_fragmentation_metrics( for metric_name, _, _ in DATABASE_FRAGMENTATION_METRICS: for tag in db_tags: aggregator.assert_metric_has_tag(metric_name, tag=tag) - for tag_prefix in ('index_id', 'index_name', 'object_name'): + for tag_prefix in ('index_id', 'index_name', 'object_name', 'schema'): aggregator.assert_metric_has_tag_prefix(metric_name, tag_prefix=tag_prefix) diff --git a/teamcity/README.md b/teamcity/README.md index 0a4b3fba01c82..4bfa5eb867af7 100644 --- a/teamcity/README.md +++ b/teamcity/README.md @@ -59,23 +59,14 @@ The TeamCity check offers two methods of data collection. To optimally monitor y init_config: instances: - - server: http://teamcity..com - - ## @param projects - mapping - optional - ## Mapping of TeamCity projects and build configurations to - ## collect events and metrics from the TeamCity REST API. - # - projects: - : - include: - - - - - exclude: - - - : - include: - - - : {} + - use_openmetrics: true + + ## @param server - string - required + ## Specify the server name of your TeamCity instance. + ## Enable Guest Authentication on your instance or specify `username` and `password` to + ## enable basic HTTP authentication. + # + server: http://teamcity..com ``` To collect [OpenMetrics-compliant][16] histogram and summary metrics (available starting in TeamCity Server 2022.10+), add the internal property, `teamcity.metrics.followOpenMetricsSpec=true`. See, [TeamCity Internal Properties][25]. diff --git a/teleport/hatch.toml b/teleport/hatch.toml index c85c5f07a7df2..010f0f41339aa 100644 --- a/teleport/hatch.toml +++ b/teleport/hatch.toml @@ -2,3 +2,18 @@ [[envs.default.matrix]] python = ["3.12"] + +[[envs.default.matrix]] +python = ["3.12"] +setup = ["caddy"] + +[envs.default.overrides] +name."^py3.12$".e2e-env = { value = true } +name."^py3.12-caddy$".e2e-env = { value = true } +matrix.setup.e2e-env = { value = true, if = ["caddy"] } +matrix.setup.env-vars = [ + { key = "USE_TELEPORT_CADDY", value = "true", if = ["caddy"] }, +] + +[envs.default] +e2e-env = false diff --git a/teleport/tests/common.py b/teleport/tests/common.py index 878e212723c11..0bfa003dad44c 100644 --- a/teleport/tests/common.py +++ b/teleport/tests/common.py @@ -2,6 +2,9 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) +import os + +USE_TELEPORT_CADDY = os.environ.get("USE_TELEPORT_CADDY", False) INSTANCE = {"teleport_url": "http://127.0.0.1", "diag_port": "3000"} diff --git a/teleport/tests/conftest.py b/teleport/tests/conftest.py index 7fa3ab5f8b076..2744bb56acb75 100644 --- a/teleport/tests/conftest.py +++ b/teleport/tests/conftest.py @@ -5,16 +5,36 @@ import pytest -from datadog_checks.dev import docker_run, get_here +from datadog_checks.dev import docker_run, get_docker_hostname, get_here +from datadog_checks.dev.conditions import CheckDockerLogs, CheckEndpoints -from .common import INSTANCE +from .common import INSTANCE, USE_TELEPORT_CADDY +HOST = get_docker_hostname() -@pytest.fixture(scope='session') +URL = "http://{}".format(HOST) + + +@pytest.fixture(scope="session") def dd_environment(): - compose_file = os.path.join(get_here(), 'docker', 'docker-compose.yaml') - with docker_run(compose_file, sleep=5): - yield INSTANCE + if USE_TELEPORT_CADDY: + compose_file = os.path.join(get_here(), "docker", "caddy", "docker-compose.yaml") + conditions = [ + CheckEndpoints(URL + ":3000/healthz", attempts=120), + ] + with docker_run(compose_file, conditions=conditions, sleep=5): + yield INSTANCE + else: + compose_file = os.path.join(get_here(), "docker", "teleport", "docker-compose.yaml") + with docker_run( + compose_file, + sleep=5, + conditions=[ + CheckDockerLogs(identifier="teleport-service", patterns=["Starting Teleport"]), + CheckEndpoints(URL + ":3000/healthz", attempts=120), + ], + ): + yield {"teleport_url": URL, "diag_port": "3000"} @pytest.fixture diff --git a/teleport/tests/docker/caddy/docker-compose.yaml b/teleport/tests/docker/caddy/docker-compose.yaml new file mode 100644 index 0000000000000..5ae718b36c157 --- /dev/null +++ b/teleport/tests/docker/caddy/docker-compose.yaml @@ -0,0 +1,12 @@ +version: "3" + +services: + teleport-caddy: + image: caddy:2.6.2-alpine + build: . + container_name: teleport-caddy + volumes: + - ./fixtures:/usr/share/caddy + - ./etc/caddy/teleport-service:/etc/caddy/ + ports: + - "3000:80" diff --git a/teleport/tests/docker/caddy/etc/caddy/teleport-service/Caddyfile b/teleport/tests/docker/caddy/etc/caddy/teleport-service/Caddyfile new file mode 100644 index 0000000000000..27f6a79c3e8a1 --- /dev/null +++ b/teleport/tests/docker/caddy/etc/caddy/teleport-service/Caddyfile @@ -0,0 +1,35 @@ +{ + debug + admin :2019 +} +:80 { + root * /usr/share/caddy/ + @metrics { + method GET + path /metrics + } + route @metrics { + rewrite * /{http.request.uri.path}/get.txt + file_server + } + + @healthz { + method GET + path /healthz + } + route @healthz { + rewrite * /{http.request.uri.path}/get.json + file_server + } + + @readyz { + method GET + path /readyz + } + route @readyz { + rewrite * /{http.request.uri.path}/get.json + file_server + } + + file_server browse +} diff --git a/teleport/tests/docker/caddy/fixtures/healthz/get.json b/teleport/tests/docker/caddy/fixtures/healthz/get.json new file mode 100644 index 0000000000000..51c30934a439d --- /dev/null +++ b/teleport/tests/docker/caddy/fixtures/healthz/get.json @@ -0,0 +1 @@ +{ "status": "ok" } diff --git a/teleport/tests/docker/caddy/fixtures/metrics/get.txt b/teleport/tests/docker/caddy/fixtures/metrics/get.txt new file mode 100644 index 0000000000000..da75918c66dc4 --- /dev/null +++ b/teleport/tests/docker/caddy/fixtures/metrics/get.txt @@ -0,0 +1,1574 @@ +# HELP certificate_mismatch_total Number of times there was a certificate mismatch +# TYPE certificate_mismatch_total counter +certificate_mismatch_total 0 +# HELP audit_failed_disk_monitoring Number of times disk monitoring failed. +# TYPE audit_failed_disk_monitoring counter +audit_failed_disk_monitoring 0 +# HELP audit_failed_emit_events Number of times emitting audit event failed. +# TYPE audit_failed_emit_events counter +audit_failed_emit_events 0 +# HELP audit_percentage_disk_space_used Percentage disk space used. +# TYPE audit_percentage_disk_space_used gauge +audit_percentage_disk_space_used 0 +# HELP audit_server_open_files Number of open audit files +# TYPE audit_server_open_files gauge +audit_server_open_files 0 +# HELP auth_generate_requests Number of current generate requests for server keys +# TYPE auth_generate_requests gauge +auth_generate_requests 0 +# HELP auth_generate_requests_throttled_total Number of throttled requests to generate new server keys +# TYPE auth_generate_requests_throttled_total counter +auth_generate_requests_throttled_total 0 +# HELP auth_generate_requests_total Number of requests to generate new server keys +# TYPE auth_generate_requests_total counter +auth_generate_requests_total 4 +# HELP auth_generate_seconds Latency for generate requests for server keys +# TYPE auth_generate_seconds histogram +auth_generate_seconds_bucket{le="0.001"} 0 +auth_generate_seconds_bucket{le="0.002"} 0 +auth_generate_seconds_bucket{le="0.004"} 1 +auth_generate_seconds_bucket{le="0.008"} 4 +auth_generate_seconds_bucket{le="0.016"} 4 +auth_generate_seconds_bucket{le="0.032"} 4 +auth_generate_seconds_bucket{le="0.064"} 4 +auth_generate_seconds_bucket{le="0.128"} 4 +auth_generate_seconds_bucket{le="0.256"} 4 +auth_generate_seconds_bucket{le="0.512"} 4 +auth_generate_seconds_bucket{le="1.024"} 4 +auth_generate_seconds_bucket{le="2.048"} 4 +auth_generate_seconds_bucket{le="4.096"} 4 +auth_generate_seconds_bucket{le="8.192"} 4 +auth_generate_seconds_bucket{le="16.384"} 4 +auth_generate_seconds_bucket{le="32.768"} 4 +auth_generate_seconds_bucket{le="+Inf"} 4 +auth_generate_seconds_sum 0.016478917 +auth_generate_seconds_count 4 +# HELP backend_batch_read_requests_total Number of read requests to the backend +# TYPE backend_batch_read_requests_total counter +backend_batch_read_requests_total{component="backend"} 56 +backend_batch_read_requests_total{component="cache"} 73 +# HELP backend_batch_read_seconds Latency for batch read operations +# TYPE backend_batch_read_seconds histogram +backend_batch_read_seconds_bucket{component="backend",le="0.001"} 51 +backend_batch_read_seconds_bucket{component="backend",le="0.002"} 55 +backend_batch_read_seconds_bucket{component="backend",le="0.004"} 56 +backend_batch_read_seconds_bucket{component="backend",le="0.008"} 56 +backend_batch_read_seconds_bucket{component="backend",le="0.016"} 56 +backend_batch_read_seconds_bucket{component="backend",le="0.032"} 56 +backend_batch_read_seconds_bucket{component="backend",le="0.064"} 56 +backend_batch_read_seconds_bucket{component="backend",le="0.128"} 56 +backend_batch_read_seconds_bucket{component="backend",le="0.256"} 56 +backend_batch_read_seconds_bucket{component="backend",le="0.512"} 56 +backend_batch_read_seconds_bucket{component="backend",le="1.024"} 56 +backend_batch_read_seconds_bucket{component="backend",le="2.048"} 56 +backend_batch_read_seconds_bucket{component="backend",le="4.096"} 56 +backend_batch_read_seconds_bucket{component="backend",le="8.192"} 56 +backend_batch_read_seconds_bucket{component="backend",le="16.384"} 56 +backend_batch_read_seconds_bucket{component="backend",le="32.768"} 56 +backend_batch_read_seconds_bucket{component="backend",le="+Inf"} 56 +backend_batch_read_seconds_sum{component="backend"} 0.015345246000000003 +backend_batch_read_seconds_count{component="backend"} 56 +backend_batch_read_seconds_bucket{component="cache",le="0.001"} 73 +backend_batch_read_seconds_bucket{component="cache",le="0.002"} 73 +backend_batch_read_seconds_bucket{component="cache",le="0.004"} 73 +backend_batch_read_seconds_bucket{component="cache",le="0.008"} 73 +backend_batch_read_seconds_bucket{component="cache",le="0.016"} 73 +backend_batch_read_seconds_bucket{component="cache",le="0.032"} 73 +backend_batch_read_seconds_bucket{component="cache",le="0.064"} 73 +backend_batch_read_seconds_bucket{component="cache",le="0.128"} 73 +backend_batch_read_seconds_bucket{component="cache",le="0.256"} 73 +backend_batch_read_seconds_bucket{component="cache",le="0.512"} 73 +backend_batch_read_seconds_bucket{component="cache",le="1.024"} 73 +backend_batch_read_seconds_bucket{component="cache",le="2.048"} 73 +backend_batch_read_seconds_bucket{component="cache",le="4.096"} 73 +backend_batch_read_seconds_bucket{component="cache",le="8.192"} 73 +backend_batch_read_seconds_bucket{component="cache",le="16.384"} 73 +backend_batch_read_seconds_bucket{component="cache",le="32.768"} 73 +backend_batch_read_seconds_bucket{component="cache",le="+Inf"} 73 +backend_batch_read_seconds_sum{component="cache"} 0.000294537 +backend_batch_read_seconds_count{component="cache"} 73 +# HELP backend_batch_write_requests_total Number of batch write requests to the backend +# TYPE backend_batch_write_requests_total counter +backend_batch_write_requests_total{component="cache"} 93 +# HELP backend_batch_write_seconds Latency for backend batch write operations +# TYPE backend_batch_write_seconds histogram +backend_batch_write_seconds_bucket{component="cache",le="0.001"} 93 +backend_batch_write_seconds_bucket{component="cache",le="0.002"} 93 +backend_batch_write_seconds_bucket{component="cache",le="0.004"} 93 +backend_batch_write_seconds_bucket{component="cache",le="0.008"} 93 +backend_batch_write_seconds_bucket{component="cache",le="0.016"} 93 +backend_batch_write_seconds_bucket{component="cache",le="0.032"} 93 +backend_batch_write_seconds_bucket{component="cache",le="0.064"} 93 +backend_batch_write_seconds_bucket{component="cache",le="0.128"} 93 +backend_batch_write_seconds_bucket{component="cache",le="0.256"} 93 +backend_batch_write_seconds_bucket{component="cache",le="0.512"} 93 +backend_batch_write_seconds_bucket{component="cache",le="1.024"} 93 +backend_batch_write_seconds_bucket{component="cache",le="2.048"} 93 +backend_batch_write_seconds_bucket{component="cache",le="4.096"} 93 +backend_batch_write_seconds_bucket{component="cache",le="8.192"} 93 +backend_batch_write_seconds_bucket{component="cache",le="16.384"} 93 +backend_batch_write_seconds_bucket{component="cache",le="32.768"} 93 +backend_batch_write_seconds_bucket{component="cache",le="+Inf"} 93 +backend_batch_write_seconds_sum{component="cache"} 2.996099999999998e-05 +backend_batch_write_seconds_count{component="cache"} 93 +# HELP backend_read_requests_total Number of read requests to the backend +# TYPE backend_read_requests_total counter +backend_read_requests_total{component="backend"} 43 +backend_read_requests_total{component="cache"} 237 +# HELP backend_read_seconds Latency for read operations +# TYPE backend_read_seconds histogram +backend_read_seconds_bucket{component="backend",le="0.001"} 35 +backend_read_seconds_bucket{component="backend",le="0.002"} 41 +backend_read_seconds_bucket{component="backend",le="0.004"} 43 +backend_read_seconds_bucket{component="backend",le="0.008"} 43 +backend_read_seconds_bucket{component="backend",le="0.016"} 43 +backend_read_seconds_bucket{component="backend",le="0.032"} 43 +backend_read_seconds_bucket{component="backend",le="0.064"} 43 +backend_read_seconds_bucket{component="backend",le="0.128"} 43 +backend_read_seconds_bucket{component="backend",le="0.256"} 43 +backend_read_seconds_bucket{component="backend",le="0.512"} 43 +backend_read_seconds_bucket{component="backend",le="1.024"} 43 +backend_read_seconds_bucket{component="backend",le="2.048"} 43 +backend_read_seconds_bucket{component="backend",le="4.096"} 43 +backend_read_seconds_bucket{component="backend",le="8.192"} 43 +backend_read_seconds_bucket{component="backend",le="16.384"} 43 +backend_read_seconds_bucket{component="backend",le="32.768"} 43 +backend_read_seconds_bucket{component="backend",le="+Inf"} 43 +backend_read_seconds_sum{component="backend"} 0.023900751 +backend_read_seconds_count{component="backend"} 43 +backend_read_seconds_bucket{component="cache",le="0.001"} 237 +backend_read_seconds_bucket{component="cache",le="0.002"} 237 +backend_read_seconds_bucket{component="cache",le="0.004"} 237 +backend_read_seconds_bucket{component="cache",le="0.008"} 237 +backend_read_seconds_bucket{component="cache",le="0.016"} 237 +backend_read_seconds_bucket{component="cache",le="0.032"} 237 +backend_read_seconds_bucket{component="cache",le="0.064"} 237 +backend_read_seconds_bucket{component="cache",le="0.128"} 237 +backend_read_seconds_bucket{component="cache",le="0.256"} 237 +backend_read_seconds_bucket{component="cache",le="0.512"} 237 +backend_read_seconds_bucket{component="cache",le="1.024"} 237 +backend_read_seconds_bucket{component="cache",le="2.048"} 237 +backend_read_seconds_bucket{component="cache",le="4.096"} 237 +backend_read_seconds_bucket{component="cache",le="8.192"} 237 +backend_read_seconds_bucket{component="cache",le="16.384"} 237 +backend_read_seconds_bucket{component="cache",le="32.768"} 237 +backend_read_seconds_bucket{component="cache",le="+Inf"} 237 +backend_read_seconds_sum{component="cache"} 0.001213918999999999 +backend_read_seconds_count{component="cache"} 237 +# HELP backend_requests Number of requests to the backend (reads, writes, and keepalives) +# TYPE backend_requests counter +backend_requests{component="backend",range="false",req=".locks"} 4 +backend_requests{component="backend",range="false",req="/authentication/preference"} 4 +backend_requests{component="backend",range="false",req="/authorities/db"} 4 +backend_requests{component="backend",range="false",req="/authorities/host"} 10 +backend_requests{component="backend",range="false",req="/authorities/jwt"} 3 +backend_requests{component="backend",range="false",req="/authorities/oidc_idp"} 3 +backend_requests{component="backend",range="false",req="/authorities/openssh"} 3 +backend_requests{component="backend",range="false",req="/authorities/saml_idp"} 3 +backend_requests{component="backend",range="false",req="/authorities/user"} 3 +backend_requests{component="backend",range="false",req="/authservers"} 1 +backend_requests{component="backend",range="false",req="/cluster-alerts"} 3 +backend_requests{component="backend",range="false",req="/cluster_configuration/audit"} 2 +backend_requests{component="backend",range="false",req="/cluster_configuration/name"} 5 +backend_requests{component="backend",range="false",req="/cluster_configuration/networking"} 3 +backend_requests{component="backend",range="false",req="/cluster_configuration/session_recording"} 3 +backend_requests{component="backend",range="false",req="/cluster_configuration/static_tokens"} 2 +backend_requests{component="backend",range="false",req="/cluster_configuration/ui"} 1 +backend_requests{component="backend",range="false",req="/external_audit_storage"} 1 +backend_requests{component="backend",range="false",req="/migrations"} 3 +backend_requests{component="backend",range="false",req="/namespaces/default"} 1 +backend_requests{component="backend",range="false",req="/nodes/default"} 2 +backend_requests{component="backend",range="false",req="/proxies"} 1 +backend_requests{component="backend",range="false",req="/restrictions"} 1 +backend_requests{component="backend",range="false",req="/roles/access"} 1 +backend_requests{component="backend",range="false",req="/roles/auditor"} 1 +backend_requests{component="backend",range="false",req="/roles/editor"} 1 +backend_requests{component="backend",range="true",req="/access_list"} 1 +backend_requests{component="backend",range="true",req="/access_list_member"} 1 +backend_requests{component="backend",range="true",req="/access_list_review"} 1 +backend_requests{component="backend",range="true",req="/access_requests"} 1 +backend_requests{component="backend",range="true",req="/appServers/default"} 1 +backend_requests{component="backend",range="true",req="/applications"} 1 +backend_requests{component="backend",range="true",req="/apps/sessions"} 2 +backend_requests{component="backend",range="true",req="/authorities/db"} 1 +backend_requests{component="backend",range="true",req="/authorities/host"} 2 +backend_requests{component="backend",range="true",req="/authorities/jwt"} 1 +backend_requests{component="backend",range="true",req="/authorities/oidc_idp"} 1 +backend_requests{component="backend",range="true",req="/authorities/openssh"} 1 +backend_requests{component="backend",range="true",req="/authorities/saml_idp"} 1 +backend_requests{component="backend",range="true",req="/authorities/user"} 1 +backend_requests{component="backend",range="true",req="/authservers"} 1 +backend_requests{component="backend",range="true",req="/cluster_configuration/scripts"} 1 +backend_requests{component="backend",range="true",req="/databaseServers/default"} 1 +backend_requests{component="backend",range="true",req="/databaseService"} 1 +backend_requests{component="backend",range="true",req="/db"} 1 +backend_requests{component="backend",range="true",req="/discovery_config"} 1 +backend_requests{component="backend",range="true",req="/headless_authentication"} 1 +backend_requests{component="backend",range="true",req="/integrations"} 1 +backend_requests{component="backend",range="true",req="/kubeServers"} 1 +backend_requests{component="backend",range="true",req="/kubernetes"} 1 +backend_requests{component="backend",range="true",req="/locks"} 2 +backend_requests{component="backend",range="true",req="/namespaces"} 1 +backend_requests{component="backend",range="true",req="/nodes/default"} 1 +backend_requests{component="backend",range="true",req="/okta_assignment"} 1 +backend_requests{component="backend",range="true",req="/okta_import_rule"} 1 +backend_requests{component="backend",range="true",req="/proxies"} 1 +backend_requests{component="backend",range="true",req="/remoteClusters"} 2 +backend_requests{component="backend",range="true",req="/reverseTunnels"} 1 +backend_requests{component="backend",range="true",req="/roles"} 1 +backend_requests{component="backend",range="true",req="/saml_idp/sessions"} 2 +backend_requests{component="backend",range="true",req="/saml_idp_service_provider"} 1 +backend_requests{component="backend",range="true",req="/snowflake/sessions"} 2 +backend_requests{component="backend",range="true",req="/tokens"} 1 +backend_requests{component="backend",range="true",req="/trustedclusters"} 1 +backend_requests{component="backend",range="true",req="/tunnelConnections"} 1 +backend_requests{component="backend",range="true",req="/user_group"} 1 +backend_requests{component="backend",range="true",req="/user_login_state"} 1 +backend_requests{component="backend",range="true",req="/web/sessions"} 2 +backend_requests{component="backend",range="true",req="/web/tokens"} 2 +backend_requests{component="backend",range="true",req="/web/users"} 3 +backend_requests{component="backend",range="true",req="/windowsDesktop"} 1 +backend_requests{component="backend",range="true",req="/windowsDesktopServices"} 1 +backend_requests{component="cache",range="false",req="/authentication/preference"} 86 +backend_requests{component="cache",range="false",req="/authorities/db"} 6 +backend_requests{component="cache",range="false",req="/authorities/host"} 19 +backend_requests{component="cache",range="false",req="/authorities/jwt"} 6 +backend_requests{component="cache",range="false",req="/authorities/oidc_idp"} 6 +backend_requests{component="cache",range="false",req="/authorities/openssh"} 6 +backend_requests{component="cache",range="false",req="/authorities/saml_idp"} 6 +backend_requests{component="cache",range="false",req="/authorities/user"} 14 +backend_requests{component="cache",range="false",req="/authservers"} 2 +backend_requests{component="cache",range="false",req="/cluster_configuration/audit"} 8 +backend_requests{component="cache",range="false",req="/cluster_configuration/name"} 27 +backend_requests{component="cache",range="false",req="/cluster_configuration/networking"} 11 +backend_requests{component="cache",range="false",req="/cluster_configuration/scripts"} 2 +backend_requests{component="cache",range="false",req="/cluster_configuration/session_recording"} 85 +backend_requests{component="cache",range="false",req="/cluster_configuration/static_tokens"} 2 +backend_requests{component="cache",range="false",req="/cluster_configuration/ui"} 3 +backend_requests{component="cache",range="false",req="/namespaces/default"} 7 +backend_requests{component="cache",range="false",req="/nodes/default"} 4 +backend_requests{component="cache",range="false",req="/proxies"} 2 +backend_requests{component="cache",range="false",req="/restrictions"} 3 +backend_requests{component="cache",range="false",req="/roles/access"} 6 +backend_requests{component="cache",range="false",req="/roles/auditor"} 6 +backend_requests{component="cache",range="false",req="/roles/editor"} 6 +backend_requests{component="cache",range="true",req="/access_list"} 1 +backend_requests{component="cache",range="true",req="/access_list_member"} 1 +backend_requests{component="cache",range="true",req="/access_list_review"} 1 +backend_requests{component="cache",range="true",req="/access_requests"} 1 +backend_requests{component="cache",range="true",req="/appServers/default"} 4 +backend_requests{component="cache",range="true",req="/applications"} 3 +backend_requests{component="cache",range="true",req="/apps/sessions"} 2 +backend_requests{component="cache",range="true",req="/authorities/db"} 6 +backend_requests{component="cache",range="true",req="/authorities/host"} 6 +backend_requests{component="cache",range="true",req="/authorities/jwt"} 5 +backend_requests{component="cache",range="true",req="/authorities/oidc_idp"} 5 +backend_requests{component="cache",range="true",req="/authorities/openssh"} 6 +backend_requests{component="cache",range="true",req="/authorities/saml_idp"} 5 +backend_requests{component="cache",range="true",req="/authorities/user"} 6 +backend_requests{component="cache",range="true",req="/authservers"} 3 +backend_requests{component="cache",range="true",req="/cluster_configuration/scripts"} 3 +backend_requests{component="cache",range="true",req="/databaseServers/default"} 4 +backend_requests{component="cache",range="true",req="/databaseService"} 3 +backend_requests{component="cache",range="true",req="/db"} 3 +backend_requests{component="cache",range="true",req="/discovery_config"} 1 +backend_requests{component="cache",range="true",req="/integrations"} 3 +backend_requests{component="cache",range="true",req="/kubeServers"} 5 +backend_requests{component="cache",range="true",req="/kubernetes"} 3 +backend_requests{component="cache",range="true",req="/locks"} 3 +backend_requests{component="cache",range="true",req="/namespaces"} 5 +backend_requests{component="cache",range="true",req="/nodes/default"} 6 +backend_requests{component="cache",range="true",req="/okta_assignment"} 1 +backend_requests{component="cache",range="true",req="/okta_import_rule"} 1 +backend_requests{component="cache",range="true",req="/proxies"} 8 +backend_requests{component="cache",range="true",req="/remoteClusters"} 4 +backend_requests{component="cache",range="true",req="/reverseTunnels"} 6 +backend_requests{component="cache",range="true",req="/roles"} 6 +backend_requests{component="cache",range="true",req="/saml_idp/sessions"} 2 +backend_requests{component="cache",range="true",req="/saml_idp_service_provider"} 4 +backend_requests{component="cache",range="true",req="/security_report/audit_query"} 2 +backend_requests{component="cache",range="true",req="/security_report/report"} 6 +backend_requests{component="cache",range="true",req="/security_report/state"} 4 +backend_requests{component="cache",range="true",req="/snowflake/sessions"} 2 +backend_requests{component="cache",range="true",req="/tokens"} 1 +backend_requests{component="cache",range="true",req="/tunnelConnections"} 6 +backend_requests{component="cache",range="true",req="/user_group"} 3 +backend_requests{component="cache",range="true",req="/user_login_state"} 1 +backend_requests{component="cache",range="true",req="/web/sessions"} 2 +backend_requests{component="cache",range="true",req="/web/tokens"} 2 +backend_requests{component="cache",range="true",req="/web/users"} 3 +backend_requests{component="cache",range="true",req="/windowsDesktop"} 5 +backend_requests{component="cache",range="true",req="/windowsDesktopServices"} 3 +# HELP backend_watcher_queues_total Watcher queue sizes +# TYPE backend_watcher_queues_total gauge +backend_watcher_queues_total{component="auth:cache"} 0 +# HELP backend_watchers_total Number of active backend watchers +# TYPE backend_watchers_total gauge +backend_watchers_total{component="backend"} 4 +# HELP backend_write_requests_total Number of write requests to the backend +# TYPE backend_write_requests_total counter +backend_write_requests_total{component="backend"} 26 +backend_write_requests_total{component="cache"} 86 +# HELP backend_write_seconds Latency for backend write operations +# TYPE backend_write_seconds histogram +backend_write_seconds_bucket{component="backend",le="0.001"} 1 +backend_write_seconds_bucket{component="backend",le="0.002"} 10 +backend_write_seconds_bucket{component="backend",le="0.004"} 16 +backend_write_seconds_bucket{component="backend",le="0.008"} 22 +backend_write_seconds_bucket{component="backend",le="0.016"} 26 +backend_write_seconds_bucket{component="backend",le="0.032"} 26 +backend_write_seconds_bucket{component="backend",le="0.064"} 26 +backend_write_seconds_bucket{component="backend",le="0.128"} 26 +backend_write_seconds_bucket{component="backend",le="0.256"} 26 +backend_write_seconds_bucket{component="backend",le="0.512"} 26 +backend_write_seconds_bucket{component="backend",le="1.024"} 26 +backend_write_seconds_bucket{component="backend",le="2.048"} 26 +backend_write_seconds_bucket{component="backend",le="4.096"} 26 +backend_write_seconds_bucket{component="backend",le="8.192"} 26 +backend_write_seconds_bucket{component="backend",le="16.384"} 26 +backend_write_seconds_bucket{component="backend",le="32.768"} 26 +backend_write_seconds_bucket{component="backend",le="+Inf"} 26 +backend_write_seconds_sum{component="backend"} 0.098229207 +backend_write_seconds_count{component="backend"} 26 +backend_write_seconds_bucket{component="cache",le="0.001"} 86 +backend_write_seconds_bucket{component="cache",le="0.002"} 86 +backend_write_seconds_bucket{component="cache",le="0.004"} 86 +backend_write_seconds_bucket{component="cache",le="0.008"} 86 +backend_write_seconds_bucket{component="cache",le="0.016"} 86 +backend_write_seconds_bucket{component="cache",le="0.032"} 86 +backend_write_seconds_bucket{component="cache",le="0.064"} 86 +backend_write_seconds_bucket{component="cache",le="0.128"} 86 +backend_write_seconds_bucket{component="cache",le="0.256"} 86 +backend_write_seconds_bucket{component="cache",le="0.512"} 86 +backend_write_seconds_bucket{component="cache",le="1.024"} 86 +backend_write_seconds_bucket{component="cache",le="2.048"} 86 +backend_write_seconds_bucket{component="cache",le="4.096"} 86 +backend_write_seconds_bucket{component="cache",le="8.192"} 86 +backend_write_seconds_bucket{component="cache",le="16.384"} 86 +backend_write_seconds_bucket{component="cache",le="32.768"} 86 +backend_write_seconds_bucket{component="cache",le="+Inf"} 86 +backend_write_seconds_sum{component="cache"} 0.00015979300000000006 +backend_write_seconds_count{component="cache"} 86 +# HELP bpf_lost_restricted_events Number of lost restricted events. +# TYPE bpf_lost_restricted_events counter +bpf_lost_restricted_events 0 +# HELP cluster_name_not_found_total Number of times a cluster name was not found +# TYPE cluster_name_not_found_total counter +cluster_name_not_found_total 0 +# HELP failed_connect_to_node_attempts_total Number of failed SSH connection attempts to a node. Use with `teleport_connect_to_node_attempts_total` to get the failure rate. +# TYPE failed_connect_to_node_attempts_total counter +failed_connect_to_node_attempts_total 0 +# HELP failed_login_attempts_total Number of times there was a failed login +# TYPE failed_login_attempts_total counter +failed_login_attempts_total 0 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 1.6583e-05 +go_gc_duration_seconds{quantile="0.25"} 9.3209e-05 +go_gc_duration_seconds{quantile="0.5"} 0.000145834 +go_gc_duration_seconds{quantile="0.75"} 0.000190625 +go_gc_duration_seconds{quantile="1"} 0.000245708 +go_gc_duration_seconds_sum 0.001096917 +go_gc_duration_seconds_count 8 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 143 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.21.6"} 1 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 4.7696184e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 1.6756536e+08 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.489942e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 507162 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 6.377168e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 4.7696184e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 2.1594112e+07 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 5.6262656e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 121624 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 2.842624e+06 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 7.7856768e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.709038395328938e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 628786 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 6000 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 15600 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 666792 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 977760 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 6.6114568e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 1.486306e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 1.835008e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 1.835008e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 9.0038552e+07 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 12 +# HELP grpc_client_started_total Total number of RPCs started on the client. +# TYPE grpc_client_started_total counter +grpc_client_started_total 1 +# HELP grpc_client_handled_total Total number of RPCs completed on the client, regardless of success or failure. +# TYPE grpc_client_handled_total counter +grpc_client_handled_total 1 +# HELP grpc_client_msg_received_total Total number of RPC stream messages received on the client. +# TYPE grpc_client_msg_received_total counter +grpc_client_msg_received_total 1 +# HELP grpc_client_msg_sent_total Total number of gRPC stream messages sent by the client. +# TYPE grpc_client_msg_sent_total counter +grpc_client_msg_sent_total 1 +# HELP grpc_server_handled_total Total number of RPCs completed on the server, regardless of success or failure. +# TYPE grpc_server_handled_total counter +grpc_server_handled_total{grpc_code="NotFound",grpc_method="GetNetworkRestrictions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="NotFound",grpc_method="GetUIConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetApps",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetAuthPreference",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetCertAuthorities",grpc_service="teleport.trust.v1.TrustService",grpc_type="unary",server="teleport-auth"} 14 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetCertAuthority",grpc_service="teleport.trust.v1.TrustService",grpc_type="unary",server="teleport-auth"} 3 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetClusterAuditConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetClusterNetworkingConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetDatabases",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetDomainName",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetInstallers",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetKubernetesClusters",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetLocks",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetRoles",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetSessionRecordingConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetSnowflakeSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetUsers",grpc_service="proto.AuthService",grpc_type="server_stream",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetWebSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetWebTokens",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetWindowsDesktopServices",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="GetWindowsDesktops",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="ListAppSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="ListIntegrations",grpc_service="teleport.integration.v1.IntegrationService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="ListResources",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 5 +grpc_server_handled_total{grpc_code="OK",grpc_method="ListSAMLIdPServiceProviders",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="ListSAMLIdPSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="ListUserGroups",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_handled_total{grpc_code="OK",grpc_method="Ping",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 4 +# HELP grpc_server_msg_received_total Total number of RPC stream messages received on the server. +# TYPE grpc_server_msg_received_total counter +grpc_server_msg_received_total{grpc_method="GetApps",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="GetAuthPreference",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_msg_received_total{grpc_method="GetCertAuthorities",grpc_service="teleport.trust.v1.TrustService",grpc_type="unary",server="teleport-auth"} 14 +grpc_server_msg_received_total{grpc_method="GetCertAuthority",grpc_service="teleport.trust.v1.TrustService",grpc_type="unary",server="teleport-auth"} 3 +grpc_server_msg_received_total{grpc_method="GetClusterAuditConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_msg_received_total{grpc_method="GetClusterNetworkingConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_msg_received_total{grpc_method="GetDatabases",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="GetDomainName",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="GetInstallers",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="GetKubernetesClusters",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="GetLocks",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_msg_received_total{grpc_method="GetNetworkRestrictions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="GetRoles",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_msg_received_total{grpc_method="GetSessionRecordingConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_msg_received_total{grpc_method="GetSnowflakeSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="GetUIConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="GetUsers",grpc_service="proto.AuthService",grpc_type="server_stream",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="GetWebSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="GetWebTokens",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="GetWindowsDesktopServices",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="GetWindowsDesktops",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="ListAppSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="ListIntegrations",grpc_service="teleport.integration.v1.IntegrationService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="ListResources",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 5 +grpc_server_msg_received_total{grpc_method="ListSAMLIdPServiceProviders",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="ListSAMLIdPSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="ListUserGroups",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_received_total{grpc_method="Ping",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 4 +grpc_server_msg_received_total{grpc_method="WatchEvents",grpc_service="proto.AuthService",grpc_type="server_stream",server="teleport-auth"} 4 +# HELP grpc_server_msg_sent_total Total number of gRPC stream messages sent by the server. +# TYPE grpc_server_msg_sent_total counter +grpc_server_msg_sent_total{grpc_method="GetApps",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="GetAuthPreference",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_msg_sent_total{grpc_method="GetCertAuthorities",grpc_service="teleport.trust.v1.TrustService",grpc_type="unary",server="teleport-auth"} 14 +grpc_server_msg_sent_total{grpc_method="GetCertAuthority",grpc_service="teleport.trust.v1.TrustService",grpc_type="unary",server="teleport-auth"} 3 +grpc_server_msg_sent_total{grpc_method="GetClusterAuditConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_msg_sent_total{grpc_method="GetClusterNetworkingConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_msg_sent_total{grpc_method="GetDatabases",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="GetDomainName",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="GetInstallers",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="GetKubernetesClusters",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="GetLocks",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_msg_sent_total{grpc_method="GetNetworkRestrictions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="GetRoles",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_msg_sent_total{grpc_method="GetSessionRecordingConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_msg_sent_total{grpc_method="GetSnowflakeSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="GetUIConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="GetWebSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="GetWebTokens",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="GetWindowsDesktopServices",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="GetWindowsDesktops",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="ListAppSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="ListIntegrations",grpc_service="teleport.integration.v1.IntegrationService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="ListResources",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 5 +grpc_server_msg_sent_total{grpc_method="ListSAMLIdPServiceProviders",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="ListSAMLIdPSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="ListUserGroups",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_msg_sent_total{grpc_method="Ping",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 4 +grpc_server_msg_sent_total{grpc_method="WatchEvents",grpc_service="proto.AuthService",grpc_type="server_stream",server="teleport-auth"} 35 +# HELP grpc_server_started_total Total number of RPCs started on the server. +# TYPE grpc_server_started_total counter +grpc_server_started_total{grpc_method="GetApps",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="GetAuthPreference",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_started_total{grpc_method="GetCertAuthorities",grpc_service="teleport.trust.v1.TrustService",grpc_type="unary",server="teleport-auth"} 14 +grpc_server_started_total{grpc_method="GetCertAuthority",grpc_service="teleport.trust.v1.TrustService",grpc_type="unary",server="teleport-auth"} 3 +grpc_server_started_total{grpc_method="GetClusterAuditConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_started_total{grpc_method="GetClusterNetworkingConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_started_total{grpc_method="GetDatabases",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="GetDomainName",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="GetInstallers",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="GetKubernetesClusters",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="GetLocks",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_started_total{grpc_method="GetNetworkRestrictions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="GetRoles",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_started_total{grpc_method="GetSessionRecordingConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 2 +grpc_server_started_total{grpc_method="GetSnowflakeSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="GetUIConfig",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="GetUsers",grpc_service="proto.AuthService",grpc_type="server_stream",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="GetWebSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="GetWebTokens",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="GetWindowsDesktopServices",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="GetWindowsDesktops",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="ListAppSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="ListIntegrations",grpc_service="teleport.integration.v1.IntegrationService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="ListResources",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 5 +grpc_server_started_total{grpc_method="ListSAMLIdPServiceProviders",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="ListSAMLIdPSessions",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="ListUserGroups",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 1 +grpc_server_started_total{grpc_method="Ping",grpc_service="proto.AuthService",grpc_type="unary",server="teleport-auth"} 4 +grpc_server_started_total{grpc_method="WatchEvents",grpc_service="proto.AuthService",grpc_type="server_stream",server="teleport-auth"} 4 +# HELP heartbeat_connections_received_total Number of times auth received a heartbeat connection +# TYPE heartbeat_connections_received_total counter +heartbeat_connections_received_total 0 +# HELP heartbeats_missed_total Number of heartbeats missed by auth server +# TYPE heartbeats_missed_total gauge +heartbeats_missed_total 0 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 4.8 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1.048576e+06 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 20 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 1.893376e+08 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.70903839127e+09 +# HELP process_state State of the teleport process: 0 - ok, 1 - recovering, 2 - degraded, 3 - starting +# TYPE process_state gauge +process_state 0 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 2.325594112e+09 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes 1.8446744073709552e+19 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight 1 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{code="200"} 3 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 +# HELP proxy_connection_limit_exceeded_total Number of times the proxy connection limit was exceeded +# TYPE proxy_connection_limit_exceeded_total counter +proxy_connection_limit_exceeded_total 0 +# HELP proxy_peer_client_dial_error_total Total number of errors encountered dialling peer Proxy Service instances. +# TYPE proxy_peer_client_dial_error_total counter +proxy_peer_client_dial_error_total 5 +# HELP proxy_peer_server_connections Number of currently opened connection to proxy Proxy Service instances. +# TYPE proxy_peer_server_connections gauge +proxy_peer_server_connections 10 +# HELP proxy_peer_client_rpc Number of current client RPC requests. +# TYPE proxy_peer_client_rpc gauge +proxy_peer_client_rpc 3 +# HELP proxy_peer_client_rpc_total Total number of client RPC requests. +# TYPE proxy_peer_client_rpc_total counter +proxy_peer_client_rpc_total 20 +# HELP proxy_peer_client_rpc_duration_seconds Duration in seconds of RPCs sent by the client. +# TYPE proxy_peer_client_rpc_duration_seconds histogram +proxy_peer_client_rpc_duration_seconds_bucket{le="0.005"} 30 +proxy_peer_client_rpc_duration_seconds_bucket{le="0.01"} 40 +proxy_peer_client_rpc_duration_seconds_bucket{le="0.025"} 50 +proxy_peer_client_rpc_duration_seconds_bucket{le="0.05"} 60 +proxy_peer_client_rpc_duration_seconds_bucket{le="0.1"} 70 +proxy_peer_client_rpc_duration_seconds_bucket{le="0.25"} 80 +proxy_peer_client_rpc_duration_seconds_bucket{le="0.5"} 90 +proxy_peer_client_rpc_duration_seconds_bucket{le="1"} 100 +proxy_peer_client_rpc_duration_seconds_count 450 +proxy_peer_client_rpc_duration_seconds_sum 150.5 +# HELP proxy_peer_client_message_sent_size Size of messages sent by the client. +# TYPE proxy_peer_client_message_sent_size histogram +proxy_peer_client_message_sent_size_bucket{le="100"} 30 +proxy_peer_client_message_sent_size_bucket{le="500"} 40 +proxy_peer_client_message_sent_size_bucket{le="1000"} 50 +proxy_peer_client_message_sent_size_bucket{le="5000"} 60 +proxy_peer_client_message_sent_size_bucket{le="10000"} 70 +proxy_peer_client_message_sent_size_bucket{le="50000"} 80 +proxy_peer_client_message_sent_size_bucket{le="100000"} 90 +proxy_peer_client_message_sent_size_bucket{le="500000"} 100 +proxy_peer_client_message_sent_size_count 450 +proxy_peer_client_message_sent_size_sum 150.5 +# HELP proxy_peer_client_message_received_size Size of messages received by the client. +# TYPE proxy_peer_client_message_received_size histogram +proxy_peer_client_message_received_size_bucket{le="100"} 20 +proxy_peer_client_message_received_size_bucket{le="500"} 30 +proxy_peer_client_message_received_size_bucket{le="1000"} 40 +proxy_peer_client_message_received_size_bucket{le="5000"} 50 +proxy_peer_client_message_received_size_bucket{le="10000"} 60 +proxy_peer_client_message_received_size_bucket{le="50000"} 70 +proxy_peer_client_message_received_size_bucket{le="100000"} 80 +proxy_peer_client_message_received_size_bucket{le="500000"} 90 +proxy_peer_client_message_received_size_bucket{le="1000000"} 100 +proxy_peer_client_message_received_size_count 300 +proxy_peer_client_message_received_size_sum 100.5 + +# HELP proxy_peer_server_connections Number of currently opened connection to peer Proxy Service clients. +# TYPE proxy_peer_server_connections gauge +proxy_peer_server_connections 12 +# HELP proxy_peer_server_rpc Number of current server RPC requests. +# TYPE proxy_peer_server_rpc gauge +proxy_peer_server_rpc 4 +# HELP proxy_peer_server_rpc_total Total number of server RPC requests. +# TYPE proxy_peer_server_rpc_total counter +proxy_peer_server_rpc_total 30 +# HELP proxy_peer_server_rpc_duration_seconds Duration in seconds of RPCs sent by the server. +# TYPE proxy_peer_server_rpc_duration_seconds histogram +proxy_peer_server_rpc_duration_seconds_bucket{le="0.005"} 25 +proxy_peer_server_rpc_duration_seconds_bucket{le="0.01"} 35 +proxy_peer_server_rpc_duration_seconds_bucket{le="0.025"} 45 +proxy_peer_server_rpc_duration_seconds_bucket{le="0.05"} 55 +proxy_peer_server_rpc_duration_seconds_bucket{le="0.1"} 65 +proxy_peer_server_rpc_duration_seconds_bucket{le="0.25"} 75 +proxy_peer_server_rpc_duration_seconds_bucket{le="0.5"} 85 +proxy_peer_server_rpc_duration_seconds_bucket{le="1"} 95 +proxy_peer_server_rpc_duration_seconds_count 400 +proxy_peer_server_rpc_duration_seconds_sum 130.2 + +# HELP proxy_peer_server_message_sent_size Size of messages sent by the server. +# TYPE proxy_peer_server_message_sent_size histogram +proxy_peer_server_message_sent_size_bucket{le="100"} 25 +proxy_peer_server_message_sent_size_bucket{le="500"} 35 +proxy_peer_server_message_sent_size_bucket{le="1000"} 45 +proxy_peer_server_message_sent_size_bucket{le="5000"} 55 +proxy_peer_server_message_sent_size_bucket{le="10000"} 65 +proxy_peer_server_message_sent_size_bucket{le="50000"} 75 +proxy_peer_server_message_sent_size_bucket{le="100000"} 85 +proxy_peer_server_message_sent_size_bucket{le="500000"} 95 +proxy_peer_server_message_sent_size_bucket{le="1000000"} 105 +proxy_peer_server_message_sent_size_count 400 +proxy_peer_server_message_sent_size_sum 130.2 +# HELP proxy_peer_server_message_received_size Size of messages received by the server. +# TYPE proxy_peer_server_message_received_size histogram +proxy_peer_server_message_received_size_bucket{le="100"} 15 +proxy_peer_server_message_received_size_bucket{le="500"} 25 +proxy_peer_server_message_received_size_bucket{le="1000"} 35 +proxy_peer_server_message_received_size_bucket{le="5000"} 45 +proxy_peer_server_message_received_size_bucket{le="10000"} 55 +proxy_peer_server_message_received_size_bucket{le="50000"} 65 +proxy_peer_server_message_received_size_bucket{le="100000"} 75 +proxy_peer_server_message_received_size_bucket{le="500000"} 85 +proxy_peer_server_message_received_size_bucket{le="1000000"} 95 +proxy_peer_server_message_received_size_count 350 +proxy_peer_server_message_received_size_sum 120.6 +# HELP proxy_missing_ssh_tunnels Number of missing SSH tunnels +# TYPE proxy_missing_ssh_tunnels gauge +proxy_missing_ssh_tunnels 10 +# HELP remote_clusters Number of inbound connections from leaf clusters. +# TYPE remote_clusters gauge +remote_clusters 10 +# HELP proxy_ssh_sessions_total Number of active sessions through this proxy +# TYPE proxy_ssh_sessions_total gauge +proxy_ssh_sessions_total 0 +# HELP rx Number of bytes received. +# TYPE rx counter +rx 0 +# HELP server_interactive_sessions_total Number of active sessions to this host +# TYPE server_interactive_sessions_total gauge +server_interactive_sessions_total 0 +# HELP teleport_audit_emit_events Number of audit events emitted +# TYPE teleport_audit_emit_events counter +teleport_audit_emit_events 0 +# HELP teleport_audit_emitted_event_sizes Size of single events emitted +# TYPE teleport_audit_emitted_event_sizes histogram +teleport_audit_emitted_event_sizes_bucket{le="64"} 0 +teleport_audit_emitted_event_sizes_bucket{le="203.18733465192952"} 0 +teleport_audit_emitted_event_sizes_bucket{le="645.079577546175"} 0 +teleport_audit_emitted_event_sizes_bucket{le="2047.9999999999995"} 0 +teleport_audit_emitted_event_sizes_bucket{le="6501.994708861743"} 0 +teleport_audit_emitted_event_sizes_bucket{le="20642.54648147759"} 0 +teleport_audit_emitted_event_sizes_bucket{le="65535.99999999996"} 0 +teleport_audit_emitted_event_sizes_bucket{le="208063.8306835757"} 0 +teleport_audit_emitted_event_sizes_bucket{le="660561.4874072828"} 0 +teleport_audit_emitted_event_sizes_bucket{le="2.0971519999999984e+06"} 0 +teleport_audit_emitted_event_sizes_bucket{le="6.658042581874422e+06"} 0 +teleport_audit_emitted_event_sizes_bucket{le="2.1137967597033046e+07"} 0 +teleport_audit_emitted_event_sizes_bucket{le="6.710886399999993e+07"} 0 +teleport_audit_emitted_event_sizes_bucket{le="2.130573626199814e+08"} 0 +teleport_audit_emitted_event_sizes_bucket{le="6.764149631050572e+08"} 0 +teleport_audit_emitted_event_sizes_bucket{le="2.1474836479999971e+09"} 0 +teleport_audit_emitted_event_sizes_bucket{le="+Inf"} 0 +teleport_audit_emitted_event_sizes_sum 0 +teleport_audit_emitted_event_sizes_count 0 +# HELP teleport_audit_queried_trimmed_events Number of events that were trimmed before being returned from a query +# TYPE teleport_audit_queried_trimmed_events counter +teleport_audit_queried_trimmed_events 0 +# HELP teleport_audit_stored_trimmed_events Number of events that were trimmed before being stored +# TYPE teleport_audit_stored_trimmed_events counter +teleport_audit_stored_trimmed_events 0 +# HELP teleport_build_info Provides build information of Teleport including gitref (git describe --long --tags), Go version, and Teleport version. The value of this gauge will always be 1. +# TYPE teleport_build_info gauge +teleport_build_info{gitref="v14.3.3-0-g542fbb0",goversion="go1.21.6",version="14.3.3"} 1 +# HELP teleport_cache_events Number of events received by a Teleport service cache. Teleport's Auth Service, Proxy Service, and other services cache incoming events related to their service. +# TYPE teleport_cache_events counter +teleport_cache_events{cache_component="auth"} 21 +teleport_cache_events{cache_component="node"} 11 +teleport_cache_events{cache_component="proxy"} 20 +# HELP teleport_cache_stale_events Number of stale events received by a Teleport service cache. A high percentage of stale events can indicate a degraded backend. +# TYPE teleport_cache_stale_events counter +teleport_cache_stale_events {cache_component="auth"} 1 +teleport_cache_stale_events {cache_component="node"} 2 +teleport_cache_stale_events {cache_component="proxy"} 3 + +# HELP teleport_connect_to_node_attempts_total Number of SSH connection attempts to a node. Use with `failed_connect_to_node_attempts_total` to get the failure rate. +# TYPE teleport_connect_to_node_attempts_total counter +teleport_connect_to_node_attempts_total 0 +# HELP teleport_reverse_tunnels_connected Number of reverse SSH tunnels connected to the Teleport Proxy Service by Teleport instances. +# TYPE teleport_reverse_tunnels_connected gauge +teleport_reverse_tunnels_connected 10 + +# HELP trusted_clusters Number of outbound connections to leaf clusters. +# TYPE trusted_clusters gauge +trusted_clusters 5 +# HELP teleport_proxy_db_connection_setup_time_seconds Time to establish connection to DB service from Proxy service. +# TYPE teleport_proxy_db_connection_setup_time_seconds histogram +teleport_proxy_db_connection_setup_time_seconds_bucket{le="0.005"} 15 +teleport_proxy_db_connection_setup_time_seconds_bucket{le="0.01"} 25 +teleport_proxy_db_connection_setup_time_seconds_bucket{le="0.025"} 35 +teleport_proxy_db_connection_setup_time_seconds_bucket{le="0.05"} 45 +teleport_proxy_db_connection_setup_time_seconds_bucket{le="0.1"} 55 +teleport_proxy_db_connection_setup_time_seconds_bucket{le="0.25"} 65 +teleport_proxy_db_connection_setup_time_seconds_bucket{le="0.5"} 75 +teleport_proxy_db_connection_setup_time_seconds_bucket{le="1"} 85 +teleport_proxy_db_connection_setup_time_seconds_count 300 +teleport_proxy_db_connection_setup_time_seconds_sum 100.2 + +# HELP teleport_proxy_db_connection_dial_attempts_total Number of dial attempts from Proxy to DB service made. +# TYPE teleport_proxy_db_connection_dial_attempts_total counter +teleport_proxy_db_connection_dial_attempts_total 50 + +# HELP teleport_proxy_db_connection_dial_failures_total Number of failed dial attempts from Proxy to DB service made. +# TYPE teleport_proxy_db_connection_dial_failures_total counter +teleport_proxy_db_connection_dial_failures_total 10 + +# HELP teleport_proxy_db_attempted_servers_total Number of servers processed during connection attempt to the DB service from Proxy service. +# TYPE teleport_proxy_db_attempted_servers_total histogram +teleport_proxy_db_attempted_servers_total_bucket{le="2"} 20 +teleport_proxy_db_attempted_servers_total_bucket{le="5"} 30 +teleport_proxy_db_attempted_servers_total_bucket{le="10"} 40 +teleport_proxy_db_attempted_servers_total_bucket{le="20"} 50 +teleport_proxy_db_attempted_servers_total_bucket{le="50"} 60 +teleport_proxy_db_attempted_servers_total_bucket{le="100"} 70 +teleport_proxy_db_attempted_servers_total_bucket{le="200"} 80 +teleport_proxy_db_attempted_servers_total_bucket{le="500"} 90 +teleport_proxy_db_attempted_servers_total_count 280 +teleport_proxy_db_attempted_servers_total_sum 90.6 + +# HELP teleport_proxy_db_connection_tls_config_time_seconds Time to fetch TLS configuration for the connection to DB service from Proxy service. +# TYPE teleport_proxy_db_connection_tls_config_time_seconds histogram +teleport_proxy_db_connection_tls_config_time_seconds_bucket{le="0.1"} 10 +teleport_proxy_db_connection_tls_config_time_seconds_bucket{le="0.25"} 20 +teleport_proxy_db_connection_tls_config_time_seconds_bucket{le="0.5"} 30 +teleport_proxy_db_connection_tls_config_time_seconds_bucket{le="1"} 40 +teleport_proxy_db_connection_tls_config_time_seconds_bucket{le="2.5"} 50 +teleport_proxy_db_connection_tls_config_time_seconds_bucket{le="5"} 60 +teleport_proxy_db_connection_tls_config_time_seconds_bucket{le="10"} 70 +teleport_proxy_db_connection_tls_config_time_seconds_bucket{le="25"} 80 +teleport_proxy_db_connection_tls_config_time_seconds_bucket{le="50"} 90 +teleport_proxy_db_connection_tls_config_time_seconds_bucket{le="100"} 100 +teleport_proxy_db_connection_tls_config_time_seconds_count 350 +teleport_proxy_db_connection_tls_config_time_seconds_sum 120.8 + +# HELP teleport_proxy_db_active_connections_total Number of currently active connections to DB service from Proxy service. +# TYPE teleport_proxy_db_active_connections_total gauge +teleport_proxy_db_active_connections_total 25 + + + +# HELP teleport_enrolled_in_upgrades Number of instances enrolled in automatic upgrades +# TYPE teleport_enrolled_in_upgrades gauge +teleport_enrolled_in_upgrades 0 +# HELP teleport_incomplete_session_uploads_total Number of sessions not yet uploaded to auth +# TYPE teleport_incomplete_session_uploads_total gauge +teleport_incomplete_session_uploads_total 0 +# HELP teleport_kubernetes_server_in_flight_requests In-flight requests currently handled by the server. +# TYPE teleport_kubernetes_server_in_flight_requests gauge +teleport_kubernetes_server_in_flight_requests{component="kube_proxy"} 0 +# HELP teleport_migrations Migrations tracks for each migration if it is active (1) or not (0). +# TYPE teleport_migrations gauge +teleport_migrations{migration="remote_clusters"} 0 +# HELP teleport_services Teleport services currently enabled and running +# TYPE teleport_services gauge +teleport_services{service_name="auth_service"} 1 +teleport_services{service_name="proxy_service"} 1 +teleport_services{service_name="ssh_service"} 1 +# HELP teleport_total_instances Total teleport instances +# TYPE teleport_total_instances gauge +teleport_total_instances 0 +# HELP tx Number of bytes transmitted. +# TYPE tx counter +tx 0 +# HELP user_login_total Number of times there was a user login +# TYPE user_login_total counter +user_login_total 0 +# HELP user_max_concurrent_sessions_hit_total Number of times a user exceeded their max concurrent ssh connections +# TYPE user_max_concurrent_sessions_hit_total counter +user_max_concurrent_sessions_hit_total 0 +# HELP watcher_event_sizes Overall size of events emitted +# TYPE watcher_event_sizes histogram +watcher_event_sizes_bucket{le="0"} 0 +watcher_event_sizes_bucket{le="100"} 6 +watcher_event_sizes_bucket{le="200"} 14 +watcher_event_sizes_bucket{le="300"} 19 +watcher_event_sizes_bucket{le="400"} 19 +watcher_event_sizes_bucket{le="500"} 20 +watcher_event_sizes_bucket{le="600"} 24 +watcher_event_sizes_bucket{le="700"} 24 +watcher_event_sizes_bucket{le="800"} 27 +watcher_event_sizes_bucket{le="900"} 27 +watcher_event_sizes_bucket{le="1000"} 27 +watcher_event_sizes_bucket{le="1100"} 27 +watcher_event_sizes_bucket{le="1200"} 27 +watcher_event_sizes_bucket{le="1300"} 27 +watcher_event_sizes_bucket{le="1400"} 27 +watcher_event_sizes_bucket{le="1500"} 29 +watcher_event_sizes_bucket{le="1600"} 29 +watcher_event_sizes_bucket{le="1700"} 29 +watcher_event_sizes_bucket{le="1800"} 29 +watcher_event_sizes_bucket{le="1900"} 33 +watcher_event_sizes_bucket{le="+Inf"} 35 +watcher_event_sizes_sum 22723 +watcher_event_sizes_count 35 +# HELP watcher_events Per resources size of events emitted +# TYPE watcher_events histogram +watcher_events_bucket{resource="/auth_server",le="0"} 0 +watcher_events_bucket{resource="/auth_server",le="200"} 0 +watcher_events_bucket{resource="/auth_server",le="400"} 1 +watcher_events_bucket{resource="/auth_server",le="600"} 1 +watcher_events_bucket{resource="/auth_server",le="800"} 1 +watcher_events_bucket{resource="/auth_server",le="+Inf"} 1 +watcher_events_sum{resource="/auth_server"} 212 +watcher_events_count{resource="/auth_server"} 1 +watcher_events_bucket{resource="/cert_authority/db",le="0"} 0 +watcher_events_bucket{resource="/cert_authority/db",le="200"} 0 +watcher_events_bucket{resource="/cert_authority/db",le="400"} 0 +watcher_events_bucket{resource="/cert_authority/db",le="600"} 0 +watcher_events_bucket{resource="/cert_authority/db",le="800"} 0 +watcher_events_bucket{resource="/cert_authority/db",le="+Inf"} 1 +watcher_events_sum{resource="/cert_authority/db"} 1420 +watcher_events_count{resource="/cert_authority/db"} 1 +watcher_events_bucket{resource="/cert_authority/host",le="0"} 0 +watcher_events_bucket{resource="/cert_authority/host",le="200"} 0 +watcher_events_bucket{resource="/cert_authority/host",le="400"} 0 +watcher_events_bucket{resource="/cert_authority/host",le="600"} 0 +watcher_events_bucket{resource="/cert_authority/host",le="800"} 0 +watcher_events_bucket{resource="/cert_authority/host",le="+Inf"} 2 +watcher_events_sum{resource="/cert_authority/host"} 3630 +watcher_events_count{resource="/cert_authority/host"} 2 +watcher_events_bucket{resource="/cert_authority/jwt",le="0"} 0 +watcher_events_bucket{resource="/cert_authority/jwt",le="200"} 0 +watcher_events_bucket{resource="/cert_authority/jwt",le="400"} 0 +watcher_events_bucket{resource="/cert_authority/jwt",le="600"} 1 +watcher_events_bucket{resource="/cert_authority/jwt",le="800"} 1 +watcher_events_bucket{resource="/cert_authority/jwt",le="+Inf"} 1 +watcher_events_sum{resource="/cert_authority/jwt"} 534 +watcher_events_count{resource="/cert_authority/jwt"} 1 +watcher_events_bucket{resource="/cert_authority/oidc_idp",le="0"} 0 +watcher_events_bucket{resource="/cert_authority/oidc_idp",le="200"} 0 +watcher_events_bucket{resource="/cert_authority/oidc_idp",le="400"} 0 +watcher_events_bucket{resource="/cert_authority/oidc_idp",le="600"} 1 +watcher_events_bucket{resource="/cert_authority/oidc_idp",le="800"} 1 +watcher_events_bucket{resource="/cert_authority/oidc_idp",le="+Inf"} 1 +watcher_events_sum{resource="/cert_authority/oidc_idp"} 544 +watcher_events_count{resource="/cert_authority/oidc_idp"} 1 +watcher_events_bucket{resource="/cert_authority/openssh",le="0"} 0 +watcher_events_bucket{resource="/cert_authority/openssh",le="200"} 0 +watcher_events_bucket{resource="/cert_authority/openssh",le="400"} 0 +watcher_events_bucket{resource="/cert_authority/openssh",le="600"} 1 +watcher_events_bucket{resource="/cert_authority/openssh",le="800"} 1 +watcher_events_bucket{resource="/cert_authority/openssh",le="+Inf"} 1 +watcher_events_sum{resource="/cert_authority/openssh"} 497 +watcher_events_count{resource="/cert_authority/openssh"} 1 +watcher_events_bucket{resource="/cert_authority/saml_idp",le="0"} 0 +watcher_events_bucket{resource="/cert_authority/saml_idp",le="200"} 0 +watcher_events_bucket{resource="/cert_authority/saml_idp",le="400"} 0 +watcher_events_bucket{resource="/cert_authority/saml_idp",le="600"} 0 +watcher_events_bucket{resource="/cert_authority/saml_idp",le="800"} 0 +watcher_events_bucket{resource="/cert_authority/saml_idp",le="+Inf"} 1 +watcher_events_sum{resource="/cert_authority/saml_idp"} 1436 +watcher_events_count{resource="/cert_authority/saml_idp"} 1 +watcher_events_bucket{resource="/cert_authority/user",le="0"} 0 +watcher_events_bucket{resource="/cert_authority/user",le="200"} 0 +watcher_events_bucket{resource="/cert_authority/user",le="400"} 0 +watcher_events_bucket{resource="/cert_authority/user",le="600"} 0 +watcher_events_bucket{resource="/cert_authority/user",le="800"} 0 +watcher_events_bucket{resource="/cert_authority/user",le="+Inf"} 2 +watcher_events_sum{resource="/cert_authority/user"} 3622 +watcher_events_count{resource="/cert_authority/user"} 2 +watcher_events_bucket{resource="/cluster_audit_config",le="0"} 0 +watcher_events_bucket{resource="/cluster_audit_config",le="200"} 2 +watcher_events_bucket{resource="/cluster_audit_config",le="400"} 2 +watcher_events_bucket{resource="/cluster_audit_config",le="600"} 2 +watcher_events_bucket{resource="/cluster_audit_config",le="800"} 2 +watcher_events_bucket{resource="/cluster_audit_config",le="+Inf"} 2 +watcher_events_sum{resource="/cluster_audit_config"} 156 +watcher_events_count{resource="/cluster_audit_config"} 2 +watcher_events_bucket{resource="/cluster_auth_preference",le="0"} 0 +watcher_events_bucket{resource="/cluster_auth_preference",le="200"} 2 +watcher_events_bucket{resource="/cluster_auth_preference",le="400"} 2 +watcher_events_bucket{resource="/cluster_auth_preference",le="600"} 2 +watcher_events_bucket{resource="/cluster_auth_preference",le="800"} 2 +watcher_events_bucket{resource="/cluster_auth_preference",le="+Inf"} 2 +watcher_events_sum{resource="/cluster_auth_preference"} 342 +watcher_events_count{resource="/cluster_auth_preference"} 2 +watcher_events_bucket{resource="/cluster_name",le="0"} 0 +watcher_events_bucket{resource="/cluster_name",le="200"} 2 +watcher_events_bucket{resource="/cluster_name",le="400"} 2 +watcher_events_bucket{resource="/cluster_name",le="600"} 2 +watcher_events_bucket{resource="/cluster_name",le="800"} 2 +watcher_events_bucket{resource="/cluster_name",le="+Inf"} 2 +watcher_events_sum{resource="/cluster_name"} 232 +watcher_events_count{resource="/cluster_name"} 2 +watcher_events_bucket{resource="/cluster_networking_config",le="0"} 0 +watcher_events_bucket{resource="/cluster_networking_config",le="200"} 2 +watcher_events_bucket{resource="/cluster_networking_config",le="400"} 2 +watcher_events_bucket{resource="/cluster_networking_config",le="600"} 2 +watcher_events_bucket{resource="/cluster_networking_config",le="800"} 2 +watcher_events_bucket{resource="/cluster_networking_config",le="+Inf"} 2 +watcher_events_sum{resource="/cluster_networking_config"} 280 +watcher_events_count{resource="/cluster_networking_config"} 2 +watcher_events_bucket{resource="/namespace",le="0"} 0 +watcher_events_bucket{resource="/namespace",le="200"} 2 +watcher_events_bucket{resource="/namespace",le="400"} 2 +watcher_events_bucket{resource="/namespace",le="600"} 2 +watcher_events_bucket{resource="/namespace",le="800"} 2 +watcher_events_bucket{resource="/namespace",le="+Inf"} 2 +watcher_events_sum{resource="/namespace"} 102 +watcher_events_count{resource="/namespace"} 2 +watcher_events_bucket{resource="/node/teleport",le="0"} 0 +watcher_events_bucket{resource="/node/teleport",le="200"} 0 +watcher_events_bucket{resource="/node/teleport",le="400"} 2 +watcher_events_bucket{resource="/node/teleport",le="600"} 2 +watcher_events_bucket{resource="/node/teleport",le="800"} 2 +watcher_events_bucket{resource="/node/teleport",le="+Inf"} 2 +watcher_events_sum{resource="/node/teleport"} 481 +watcher_events_count{resource="/node/teleport"} 2 +watcher_events_bucket{resource="/proxy",le="0"} 0 +watcher_events_bucket{resource="/proxy",le="200"} 0 +watcher_events_bucket{resource="/proxy",le="400"} 1 +watcher_events_bucket{resource="/proxy",le="600"} 1 +watcher_events_bucket{resource="/proxy",le="800"} 1 +watcher_events_bucket{resource="/proxy",le="+Inf"} 1 +watcher_events_sum{resource="/proxy"} 202 +watcher_events_count{resource="/proxy"} 1 +watcher_events_bucket{resource="/role",le="0"} 0 +watcher_events_bucket{resource="/role",le="200"} 0 +watcher_events_bucket{resource="/role",le="400"} 0 +watcher_events_bucket{resource="/role",le="600"} 2 +watcher_events_bucket{resource="/role",le="800"} 4 +watcher_events_bucket{resource="/role",le="+Inf"} 6 +watcher_events_sum{resource="/role"} 7586 +watcher_events_count{resource="/role"} 6 +watcher_events_bucket{resource="/session_recording_config",le="0"} 0 +watcher_events_bucket{resource="/session_recording_config",le="200"} 2 +watcher_events_bucket{resource="/session_recording_config",le="400"} 2 +watcher_events_bucket{resource="/session_recording_config",le="600"} 2 +watcher_events_bucket{resource="/session_recording_config",le="800"} 2 +watcher_events_bucket{resource="/session_recording_config",le="+Inf"} 2 +watcher_events_sum{resource="/session_recording_config"} 254 +watcher_events_count{resource="/session_recording_config"} 2 +watcher_events_bucket{resource="/watch_status",le="0"} 0 +watcher_events_bucket{resource="/watch_status",le="200"} 2 +watcher_events_bucket{resource="/watch_status",le="400"} 3 +watcher_events_bucket{resource="/watch_status",le="600"} 3 +watcher_events_bucket{resource="/watch_status",le="800"} 4 +watcher_events_bucket{resource="/watch_status",le="+Inf"} 4 +watcher_events_sum{resource="/watch_status"} 1193 +watcher_events_count{resource="/watch_status"} 4 + +# HELP audit_failed_disk_monitoring Number of times disk monitoring failed. +# TYPE audit_failed_disk_monitoring counter +audit_failed_disk_monitoring 14 + +# HELP audit_failed_emit_events Number of times emitting audit events failed. +# TYPE audit_failed_emit_events counter +audit_failed_emit_events 7 + +# HELP audit_percentage_disk_space_used Percentage of disk space used. +# TYPE audit_percentage_disk_space_used gauge +audit_percentage_disk_space_used 0.35 + +# HELP audit_server_open_files Number of open audit files. +# TYPE audit_server_open_files gauge +audit_server_open_files 30 + +# HELP auth_generate_requests_throttled_total Number of throttled requests to generate new server keys. +# TYPE auth_generate_requests_throttled_total counter +auth_generate_requests_throttled_total 20 + +# HELP auth_generate_requests_total Number of requests to generate new server keys. +# TYPE auth_generate_requests_total counter +auth_generate_requests_total 90 + +# HELP auth_generate_requests Number of current generate requests. +# TYPE auth_generate_requests gauge +auth_generate_requests 15 + +# HELP auth_generate_seconds Latency for generate requests. +# TYPE auth_generate_seconds histogram +auth_generate_seconds_bucket{le="0.005"} 0 +auth_generate_seconds_bucket{le="0.01"} 1 +auth_generate_seconds_bucket{le="0.025"} 3 +auth_generate_seconds_bucket{le="0.05"} 5 +auth_generate_seconds_bucket{le="0.075"} 7 +auth_generate_seconds_bucket{le="0.1"} 8 +auth_generate_seconds_bucket{le="0.25"} 10 +auth_generate_seconds_bucket{le="0.5"} 14 +auth_generate_seconds_bucket{le="0.75"} 20 +auth_generate_seconds_bucket{le="1"} 25 +auth_generate_seconds_bucket{le="+Inf"} 30 +auth_generate_seconds_sum 33.0 +auth_generate_seconds_count 30 + +# TYPE teleport_audit_emit_events counter +teleport_audit_emit_events_total 0 + +# TYPE teleport_audit_parquetlog_batch_processing_seconds histogram +teleport_audit_parquetlog_batch_processing_seconds_bucket{le="0.005"} 0 +teleport_audit_parquetlog_batch_processing_seconds_bucket{le="0.01"} 0 +teleport_audit_parquetlog_batch_processing_seconds_bucket{le="0.025"} 0 +teleport_audit_parquetlog_batch_processing_seconds_bucket{le="0.05"} 0 +teleport_audit_parquetlog_batch_processing_seconds_bucket{le="0.1"} 0 +teleport_audit_parquetlog_batch_processing_seconds_bucket{le="0.25"} 0 +teleport_audit_parquetlog_batch_processing_seconds_bucket{le="0.5"} 0 +teleport_audit_parquetlog_batch_processing_seconds_bucket{le="1"} 0 +teleport_audit_parquetlog_batch_processing_seconds_bucket{le="2.5"} 0 +teleport_audit_parquetlog_batch_processing_seconds_bucket{le="5"} 0 +teleport_audit_parquetlog_batch_processing_seconds_bucket{le="10"} 0 +teleport_audit_parquetlog_batch_processing_seconds_bucket{le="+Inf"} 0 +teleport_audit_parquetlog_batch_processing_seconds_sum 0 +teleport_audit_parquetlog_batch_processing_seconds_count 0 + +# TYPE teleport_audit_parquetlog_s3_flush_seconds histogram +teleport_audit_parquetlog_s3_flush_seconds_bucket{le="0.005"} 0 +teleport_audit_parquetlog_s3_flush_seconds_bucket{le="0.01"} 0 +teleport_audit_parquetlog_s3_flush_seconds_bucket{le="0.025"} 0 +teleport_audit_parquetlog_s3_flush_seconds_bucket{le="0.05"} 0 +teleport_audit_parquetlog_s3_flush_seconds_bucket{le="0.1"} 0 +teleport_audit_parquetlog_s3_flush_seconds_bucket{le="0.25"} 0 +teleport_audit_parquetlog_s3_flush_seconds_bucket{le="0.5"} 0 +teleport_audit_parquetlog_s3_flush_seconds_bucket{le="1"} 0 +teleport_audit_parquetlog_s3_flush_seconds_bucket{le="2.5"} 0 +teleport_audit_parquetlog_s3_flush_seconds_bucket{le="5"} 0 +teleport_audit_parquetlog_s3_flush_seconds_bucket{le="10"} 0 +teleport_audit_parquetlog_s3_flush_seconds_bucket{le="+Inf"} 0 +teleport_audit_parquetlog_s3_flush_seconds_sum 0 +teleport_audit_parquetlog_s3_flush_seconds_count 0 + +# TYPE teleport_audit_parquetlog_delete_events_seconds histogram +teleport_audit_parquetlog_delete_events_seconds_bucket{le="0.005"} 0 +teleport_audit_parquetlog_delete_events_seconds_bucket{le="0.01"} 0 +teleport_audit_parquetlog_delete_events_seconds_bucket{le="0.025"} 0 +teleport_audit_parquetlog_delete_events_seconds_bucket{le="0.05"} 0 +teleport_audit_parquetlog_delete_events_seconds_bucket{le="0.1"} 0 +teleport_audit_parquetlog_delete_events_seconds_bucket{le="0.25"} 0 +teleport_audit_parquetlog_delete_events_seconds_bucket{le="0.5"} 0 +teleport_audit_parquetlog_delete_events_seconds_bucket{le="1"} 0 +teleport_audit_parquetlog_delete_events_seconds_bucket{le="2.5"} 0 +teleport_audit_parquetlog_delete_events_seconds_bucket{le="5"} 0 +teleport_audit_parquetlog_delete_events_seconds_bucket{le="10"} 0 +teleport_audit_parquetlog_delete_events_seconds_bucket{le="+Inf"} 0 +teleport_audit_parquetlog_delete_events_seconds_sum 0 +teleport_audit_parquetlog_delete_events_seconds_count 0 + +# TYPE teleport_audit_parquetlog_batch_size histogram +teleport_audit_parquetlog_batch_size_bucket{le="500"} 0 +teleport_audit_parquetlog_batch_size_bucket{le="1000"} 0 +teleport_audit_parquetlog_batch_size_bucket{le="1500"} 0 +teleport_audit_parquetlog_batch_size_bucket{le="2000"} 0 +teleport_audit_parquetlog_batch_size_bucket{le="2500"} 0 +teleport_audit_parquetlog_batch_size_bucket{le="3000"} 0 +teleport_audit_parquetlog_batch_size_bucket{le="3500"} 0 +teleport_audit_parquetlog_batch_size_bucket{le="4000"} 0 +teleport_audit_parquetlog_batch_size_bucket{le="4500"} 0 +teleport_audit_parquetlog_batch_size_bucket{le="5000"} 0 +teleport_audit_parquetlog_batch_size_bucket{le="+Inf"} 0 +teleport_audit_parquetlog_batch_size_sum 0 +teleport_audit_parquetlog_batch_size_count 0 + +# TYPE teleport_audit_parquetlog_batch_count counter +teleport_audit_parquetlog_batch_count 0 + +# TYPE teleport_audit_parquetlog_last_processed_timestamp gauge +teleport_audit_parquetlog_last_processed_timestamp 0 + +# TYPE teleport_audit_parquetlog_age_oldest_processed_message gauge +teleport_audit_parquetlog_age_oldest_processed_message 0 + +# TYPE teleport_audit_parquetlog_errors_from_collect_count counter +teleport_audit_parquetlog_errors_from_collect_count 0 + +# TYPE teleport_connected_resources gauge +teleport_connected_resources 0 + +# TYPE teleport_registered_servers gauge +teleport_registered_servers 0 + +# TYPE teleport_registered_servers_by_install_methods gauge +teleport_registered_servers_by_install_methods 0 + +# TYPE user_login_total counter +user_login_total 0 + +# TYPE teleport_migrations gauge +teleport_migrations 0 + +# HELP s3_requests_total Total number of requests to the S3 API. +# TYPE s3_requests_total counter +s3_requests_total 4000 + +# HELP s3_requests Total number of requests to the S3 API grouped by result. +# TYPE s3_requests counter +s3_requests{result="success"} 3200 +s3_requests{result="failure"} 800 + +# TYPE s3_requests_seconds histogram +s3_requests_seconds_bucket{le="0.005"} 0 +s3_requests_seconds_bucket{le="0.01"} 0 +s3_requests_seconds_bucket{le="0.025"} 0 +s3_requests_seconds_bucket{le="0.05"} 0 +s3_requests_seconds_bucket{le="0.1"} 0 +s3_requests_seconds_bucket{le="0.25"} 0 +s3_requests_seconds_bucket{le="0.5"} 0 +s3_requests_seconds_bucket{le="1"} 0 +s3_requests_seconds_bucket{le="2.5"} 0 +s3_requests_seconds_bucket{le="5"} 0 +s3_requests_seconds_bucket{le="10"} 0 +s3_requests_seconds_bucket{le="+Inf"} 0 +s3_requests_seconds_sum 0 +s3_requests_seconds_count 0 + + +# HELP dynamo_requests_total Total number of requests to the DYNAMO API. +# TYPE dynamo_requests_total counter +dynamo_requests_total 4000 + +# HELP dynamo_requests Total number of requests to the DYNAMO API grouped by result. +# TYPE dynamo_requests counter +dynamo_requests{result="success"} 3200 +dynamo_requests{result="failure"} 800 + +# TYPE dynamo_requests_seconds histogram +dynamo_requests_seconds_bucket{le="0.005"} 0 +dynamo_requests_seconds_bucket{le="0.01"} 0 +dynamo_requests_seconds_bucket{le="0.025"} 0 +dynamo_requests_seconds_bucket{le="0.05"} 0 +dynamo_requests_seconds_bucket{le="0.1"} 0 +dynamo_requests_seconds_bucket{le="0.25"} 0 +dynamo_requests_seconds_bucket{le="0.5"} 0 +dynamo_requests_seconds_bucket{le="1"} 0 +dynamo_requests_seconds_bucket{le="2.5"} 0 +dynamo_requests_seconds_bucket{le="5"} 0 +dynamo_requests_seconds_bucket{le="10"} 0 +dynamo_requests_seconds_bucket{le="+Inf"} 0 +dynamo_requests_seconds_sum 0 +dynamo_requests_seconds_count 0 + + +# HELP firestore_events_backend_batch_read_requests Number of batch read requests to Cloud Firestore events. +# TYPE firestore_events_backend_batch_read_requests counter +firestore_events_backend_batch_read_requests 150 + +# HELP firestore_events_backend_batch_read_seconds Latency for Cloud Firestore events batch read operations. +# TYPE firestore_events_backend_batch_read_seconds histogram +firestore_events_backend_batch_read_seconds_bucket{le="0.005"} 250 +firestore_events_backend_batch_read_seconds_bucket{le="0.01"} 500 +firestore_events_backend_batch_read_seconds_bucket{le="0.025"} 750 +firestore_events_backend_batch_read_seconds_bucket{le="0.05"} 1000 +firestore_events_backend_batch_read_seconds_bucket{le="0.075"} 1200 +firestore_events_backend_batch_read_seconds_bucket{le="0.1"} 1500 +firestore_events_backend_batch_read_seconds_bucket{le="0.25"} 2000 +firestore_events_backend_batch_read_seconds_bucket{le="0.5"} 2500 +firestore_events_backend_batch_read_seconds_bucket{le="0.75"} 2750 +firestore_events_backend_batch_read_seconds_bucket{le="1"} 3000 +firestore_events_backend_batch_read_seconds_bucket{le="+Inf"} 3200 +firestore_events_backend_batch_read_seconds_count 3200 +firestore_events_backend_batch_read_seconds_sum 800 + +# HELP firestore_events_backend_batch_write_requests Number of batch write requests to Cloud Firestore events. +# TYPE firestore_events_backend_batch_write_requests counter +firestore_events_backend_batch_write_requests 4800 + +# HELP firestore_events_backend_batch_write_seconds Latency for Cloud Firestore events batch write operations. +# TYPE firestore_events_backend_batch_write_seconds histogram +firestore_events_backend_batch_write_seconds_bucket{le="0.005"} 1000 +firestore_events_backend_batch_write_seconds_bucket{le="0.01"} 2000 +firestore_events_backend_batch_write_seconds_bucket{le="0.025"} 3000 +firestore_events_backend_batch_write_seconds_bucket{le="0.05"} 4000 +firestore_events_backend_batch_write_seconds_bucket{le="0.075"} 5000 +firestore_events_backend_batch_write_seconds_bucket{le="0.1"} 6000 +firestore_events_backend_batch_write_seconds_bucket{le="0.25"} 7000 +firestore_events_backend_batch_write_seconds_bucket{le="0.5"} 8000 +firestore_events_backend_batch_write_seconds_bucket{le="0.75"} 9000 +firestore_events_backend_batch_write_seconds_bucket{le="1"} 10000 +firestore_events_backend_batch_write_seconds_bucket{le="+Inf"} 11000 +firestore_events_backend_batch_write_seconds_count 11000 +firestore_events_backend_batch_write_seconds_sum 3500 + +# HELP firestore_events_backend_write_requests Number of write requests to Cloud Firestore events. +# TYPE firestore_events_backend_write_requests counter +firestore_events_backend_write_requests 7500 + +# HELP firestore_events_backend_write_seconds Latency for Cloud Firestore events write operations. +# TYPE firestore_events_backend_write_seconds histogram +firestore_events_backend_write_seconds_bucket{le="0.005"} 1500 +firestore_events_backend_write_seconds_bucket{le="0.01"} 3000 +firestore_events_backend_write_seconds_bucket{le="0.025"} 4500 +firestore_events_backend_write_seconds_bucket{le="0.05"} 6000 +firestore_events_backend_write_seconds_bucket{le="0.075"} 7500 +firestore_events_backend_write_seconds_bucket{le="0.1"} 9000 +firestore_events_backend_write_seconds_bucket{le="0.25"} 10500 +firestore_events_backend_write_seconds_bucket{le="0.5"} 12000 +firestore_events_backend_write_seconds_bucket{le="0.75"} 13500 +firestore_events_backend_write_seconds_bucket{le="1"} 15000 +firestore_events_backend_write_seconds_bucket{le="+Inf"} 16500 +firestore_events_backend_write_seconds_count 16500 +firestore_events_backend_write_seconds_sum 5250 + + +# HELP gcs_event_storage_downloads_seconds Latency for GCS download operations. +# TYPE gcs_event_storage_downloads_seconds histogram +gcs_event_storage_downloads_seconds_bucket{le="0.005"} 100 +gcs_event_storage_downloads_seconds_bucket{le="0.01"} 300 +gcs_event_storage_downloads_seconds_bucket{le="0.025"} 500 +gcs_event_storage_downloads_seconds_bucket{le="0.05"} 700 +gcs_event_storage_downloads_seconds_bucket{le="0.075"} 900 +gcs_event_storage_downloads_seconds_bucket{le="0.1"} 1100 +gcs_event_storage_downloads_seconds_bucket{le="0.25"} 1300 +gcs_event_storage_downloads_seconds_bucket{le="0.5"} 1500 +gcs_event_storage_downloads_seconds_bucket{le="0.75"} 1700 +gcs_event_storage_downloads_seconds_bucket{le="1"} 1900 +gcs_event_storage_downloads_seconds_bucket{le="+Inf"} 2000 +gcs_event_storage_downloads_seconds_count 2000 +gcs_event_storage_downloads_seconds_sum 600 + +# HELP gcs_event_storage_downloads Number of downloads from the GCS backend. +# TYPE gcs_event_storage_downloads counter +gcs_event_storage_downloads 4500 + +# HELP gcs_event_storage_uploads_seconds Latency for GCS upload operations. +# TYPE gcs_event_storage_uploads_seconds histogram +gcs_event_storage_uploads_seconds_bucket{le="0.005"} 200 +gcs_event_storage_uploads_seconds_bucket{le="0.01"} 400 +gcs_event_storage_uploads_seconds_bucket{le="0.025"} 600 +gcs_event_storage_uploads_seconds_bucket{le="0.05"} 800 +gcs_event_storage_uploads_seconds_bucket{le="0.075"} 1000 +gcs_event_storage_uploads_seconds_bucket{le="0.1"} 1200 +gcs_event_storage_uploads_seconds_bucket{le="0.25"} 1400 +gcs_event_storage_uploads_seconds_bucket{le="0.5"} 1600 +gcs_event_storage_uploads_seconds_bucket{le="0.75"} 1800 +gcs_event_storage_uploads_seconds_bucket{le="1"} 2000 +gcs_event_storage_uploads_seconds_bucket{le="+Inf"} 2200 +gcs_event_storage_uploads_seconds_count 2200 +gcs_event_storage_uploads_seconds_sum 700 + +# HELP gcs_event_storage_uploads Number of uploads to the GCS backend. +# TYPE gcs_event_storage_uploads counter +gcs_event_storage_uploads 5000 + + +# HELP etcd_backend_batch_read_requests Number of read requests to the etcd database. +# TYPE etcd_backend_batch_read_requests counter +etcd_backend_batch_read_requests 200 + +# HELP etcd_backend_batch_read_seconds Latency for etcd read operations. +# TYPE etcd_backend_batch_read_seconds histogram +etcd_backend_batch_read_seconds_bucket{le="0.005"} 20 +etcd_backend_batch_read_seconds_bucket{le="0.01"} 40 +etcd_backend_batch_read_seconds_bucket{le="0.025"} 80 +etcd_backend_batch_read_seconds_bucket{le="0.05"} 120 +etcd_backend_batch_read_seconds_bucket{le="0.075"} 150 +etcd_backend_batch_read_seconds_bucket{le="0.1"} 160 +etcd_backend_batch_read_seconds_bucket{le="0.25"} 180 +etcd_backend_batch_read_seconds_bucket{le="0.5"} 190 +etcd_backend_batch_read_seconds_bucket{le="0.75"} 195 +etcd_backend_batch_read_seconds_bucket{le="1"} 200 +etcd_backend_batch_read_seconds_bucket{le="+Inf"} 200 +etcd_backend_batch_read_seconds_sum 400.0 +etcd_backend_batch_read_seconds_count 200 + +# HELP etcd_backend_read_requests Number of read requests to the etcd database. +# TYPE etcd_backend_read_requests counter +etcd_backend_read_requests 1000 + +# HELP etcd_backend_read_seconds Latency for etcd read operations. +# TYPE etcd_backend_read_seconds histogram +etcd_backend_read_seconds_bucket{le="0.005"} 100 +etcd_backend_read_seconds_bucket{le="0.01"} 200 +etcd_backend_read_seconds_bucket{le="0.025"} 400 +etcd_backend_read_seconds_bucket{le="0.05"} 600 +etcd_backend_read_seconds_bucket{le="0.075"} 800 +etcd_backend_read_seconds_bucket{le="0.1"} 900 +etcd_backend_read_seconds_bucket{le="0.25"} 950 +etcd_backend_read_seconds_bucket{le="0.5"} 975 +etcd_backend_read_seconds_bucket{le="0.75"} 990 +etcd_backend_read_seconds_bucket{le="1"} 1000 +etcd_backend_read_seconds_bucket{le="+Inf"} 1000 +etcd_backend_read_seconds_sum 2000.0 +etcd_backend_read_seconds_count 1000 + +# HELP etcd_backend_tx_requests Number of transaction requests to the database. +# TYPE etcd_backend_tx_requests counter +etcd_backend_tx_requests 500 + +# HELP etcd_backend_tx_seconds Latency for etcd transaction operations. +# TYPE etcd_backend_tx_seconds histogram +etcd_backend_tx_seconds_bucket{le="0.005"} 50 +etcd_backend_tx_seconds_bucket{le="0.01"} 100 +etcd_backend_tx_seconds_bucket{le="0.025"} 200 +etcd_backend_tx_seconds_bucket{le="0.05"} 300 +etcd_backend_tx_seconds_bucket{le="0.075"} 400 +etcd_backend_tx_seconds_bucket{le="0.1"} 450 +etcd_backend_tx_seconds_bucket{le="0.25"} 475 +etcd_backend_tx_seconds_bucket{le="0.5"} 490 +etcd_backend_tx_seconds_bucket{le="0.75"} 498 +etcd_backend_tx_seconds_bucket{le="1"} 500 +etcd_backend_tx_seconds_bucket{le="+Inf"} 500 +etcd_backend_tx_seconds_sum 1000.0 +etcd_backend_tx_seconds_count 500 + +# HELP etcd_backend_write_requests Number of write requests to the database. +# TYPE etcd_backend_write_requests counter +etcd_backend_write_requests 1200 + +# HELP etcd_backend_write_seconds Latency for etcd write operations. +# TYPE etcd_backend_write_seconds histogram +etcd_backend_write_seconds_bucket{le="0.005"} 120 +etcd_backend_write_seconds_bucket{le="0.01"} 240 +etcd_backend_write_seconds_bucket{le="0.025"} 480 +etcd_backend_write_seconds_bucket{le="0.05"} 720 +etcd_backend_write_seconds_bucket{le="0.075"} 960 +etcd_backend_write_seconds_bucket{le="0.1"} 1080 +etcd_backend_write_seconds_bucket{le="0.25"} 1140 +etcd_backend_write_seconds_bucket{le="0.5"} 1176 +etcd_backend_write_seconds_bucket{le="0.75"} 1194 +etcd_backend_write_seconds_bucket{le="1"} 1200 +etcd_backend_write_seconds_bucket{le="+Inf"} 1200 +etcd_backend_write_seconds_sum 2400.0 +etcd_backend_write_seconds_count 1200 + +# HELP teleport_etcd_events Total number of etcd events processed. +# TYPE teleport_etcd_events counter +teleport_etcd_events 2000 + +# HELP teleport_etcd_event_backpressure Total number of times event processing encountered backpressure. +# TYPE teleport_etcd_event_backpressure counter +teleport_etcd_event_backpressure 3 + +# TYPE teleport_kubernetes_client_in_flight_requests gauge +# HELP teleport_kubernetes_client_in_flight_requests Teleport Kubernetes Service In-flight requests waiting for the upstream response. +teleport_kubernetes_client_in_flight_requests 0 + +# TYPE teleport_kubernetes_client_requests_total counter +# HELP teleport_kubernetes_client_requests_total Teleport Kubernetes Service Total number of requests sent to the upstream teleport proxy, kube_service or Kubernetes Cluster servers. +teleport_kubernetes_client_requests_total 0 + +# TYPE teleport_kubernetes_client_tls_duration_seconds histogram +# HELP teleport_kubernetes_client_tls_duration_seconds Teleport Kubernetes Service Latency distribution of TLS handshakes. +teleport_kubernetes_client_tls_duration_seconds_bucket{le="0.005"} 0 +teleport_kubernetes_client_tls_duration_seconds_bucket{le="0.01"} 0 +teleport_kubernetes_client_tls_duration_seconds_bucket{le="+Inf"} 0 +teleport_kubernetes_client_tls_duration_seconds_sum 0 +teleport_kubernetes_client_tls_duration_seconds_count 0 + +# TYPE teleport_kubernetes_client_got_conn_duration_seconds histogram +# HELP teleport_kubernetes_client_got_conn_duration_seconds Teleport Kubernetes Service Latency distribution of time to dial to the upstream server - using reversetunnel or direct dialer. +teleport_kubernetes_client_got_conn_duration_seconds_bucket{le="0.005"} 0 +teleport_kubernetes_client_got_conn_duration_seconds_bucket{le="0.01"} 0 +teleport_kubernetes_client_got_conn_duration_seconds_bucket{le="+Inf"} 0 +teleport_kubernetes_client_got_conn_duration_seconds_sum 0 +teleport_kubernetes_client_got_conn_duration_seconds_count 0 + +# TYPE teleport_kubernetes_client_first_byte_response_duration_seconds histogram +# HELP teleport_kubernetes_client_first_byte_response_duration_seconds Teleport Kubernetes Service Latency distribution of time to receive the first response byte from the upstream server. +teleport_kubernetes_client_first_byte_response_duration_seconds_bucket{le="0.005"} 0 +teleport_kubernetes_client_first_byte_response_duration_seconds_bucket{le="0.01"} 0 +teleport_kubernetes_client_first_byte_response_duration_seconds_bucket{le="+Inf"} 0 +teleport_kubernetes_client_first_byte_response_duration_seconds_sum 0 +teleport_kubernetes_client_first_byte_response_duration_seconds_count 0 + +# TYPE teleport_kubernetes_client_request_duration_seconds histogram +# HELP teleport_kubernetes_client_request_duration_seconds Teleport Kubernetes Service Latency distribution of the upstream request time. +teleport_kubernetes_client_request_duration_seconds_bucket{le="0.005"} 0 +teleport_kubernetes_client_request_duration_seconds_bucket{le="0.01"} 0 +teleport_kubernetes_client_request_duration_seconds_bucket{le="+Inf"} 0 +teleport_kubernetes_client_request_duration_seconds_sum 0 +teleport_kubernetes_client_request_duration_seconds_count 0 + +# TYPE teleport_kubernetes_server_in_flight_requests gauge +teleport_kubernetes_server_in_flight_requests{} 5 + +# TYPE teleport_kubernetes_server_api_requests_total counter +teleport_kubernetes_server_api_requests_total{} 142 + +# TYPE teleport_kubernetes_server_request_duration_seconds histogram +teleport_kubernetes_server_request_duration_seconds_bucket{le="0.005"} 0 +teleport_kubernetes_server_request_duration_seconds_bucket{le="0.01"} 1 +teleport_kubernetes_server_request_duration_seconds_bucket{le="0.025"} 3 +teleport_kubernetes_server_request_duration_seconds_bucket{le="0.05"} 8 +teleport_kubernetes_server_request_duration_seconds_bucket{le="0.1"} 15 +teleport_kubernetes_server_request_duration_seconds_bucket{le="0.25"} 24 +teleport_kubernetes_server_request_duration_seconds_bucket{le="0.5"} 30 +teleport_kubernetes_server_request_duration_seconds_bucket{le="1"} 40 +teleport_kubernetes_server_request_duration_seconds_bucket{le="+Inf"} 50 +teleport_kubernetes_server_request_duration_seconds_sum{} 123.45 +teleport_kubernetes_server_request_duration_seconds_count{} 50 + +# TYPE teleport_kubernetes_server_response_size_bytes histogram +teleport_kubernetes_server_response_size_bytes_bucket{le="100"} 5 +teleport_kubernetes_server_response_size_bytes_bucket{le="500"} 15 +teleport_kubernetes_server_response_size_bytes_bucket{le="1000"} 20 +teleport_kubernetes_server_response_size_bytes_bucket{le="5000"} 30 +teleport_kubernetes_server_response_size_bytes_bucket{le="10000"} 40 +teleport_kubernetes_server_response_size_bytes_bucket{le="+Inf"} 50 +teleport_kubernetes_server_response_size_bytes_sum{} 1234500 +teleport_kubernetes_server_response_size_bytes_count{} 50 + +# TYPE teleport_kubernetes_server_exec_in_flight_sessions gauge +teleport_kubernetes_server_exec_in_flight_sessions{} 4 + +# TYPE teleport_kubernetes_server_exec_sessions_total counter +teleport_kubernetes_server_exec_sessions_total{} 89 + +# TYPE teleport_kubernetes_server_portforward_in_flight_sessions gauge +teleport_kubernetes_server_portforward_in_flight_sessions{} 3 + +# TYPE teleport_kubernetes_server_portforward_sessions_total counter +teleport_kubernetes_server_portforward_sessions_total{} 107 + +# TYPE teleport_kubernetes_server_join_in_flight_sessions gauge +teleport_kubernetes_server_join_in_flight_sessions{} 2 + +# TYPE teleport_kubernetes_server_join_sessions_total counter +teleport_kubernetes_server_join_sessions_total{} 76 + +# TYPE teleport_db_messages_from_client_total counter +# HELP teleport_db_messages_from_client_total Number of messages (packets) received from the DB client +teleport_db_messages_from_client_total 10 + +# TYPE teleport_db_messages_from_server_total counter +# HELP teleport_db_messages_from_server_total Number of messages (packets) received from the DB server +teleport_db_messages_from_server_total 5 + +# TYPE teleport_db_method_call_count_total counter +# HELP teleport_db_method_call_count_total Number of times a DB method was called +teleport_db_method_call_count_total 20 + +# TYPE teleport_db_method_call_latency_seconds histogram +# HELP teleport_db_method_call_latency_seconds Call latency for a DB method calls +teleport_db_method_call_latency_seconds_bucket{le="0.005"} 5 +teleport_db_method_call_latency_seconds_bucket{le="0.01"} 10 +teleport_db_method_call_latency_seconds_bucket{le="0.025"} 15 +teleport_db_method_call_latency_seconds_bucket{le="0.05"} 18 +teleport_db_method_call_latency_seconds_bucket{le="0.1"} 20 +teleport_db_method_call_latency_seconds_count 20 +teleport_db_method_call_latency_seconds_sum 0.2 + +# TYPE teleport_db_initialized_connections_total counter +# HELP teleport_db_initialized_connections_total Number of initialized DB connections +teleport_db_initialized_connections_total 3 + +# TYPE teleport_db_active_connections_total gauge +# HELP teleport_db_active_connections_total Number of active DB connections +teleport_db_active_connections_total 2 + +# TYPE teleport_db_connection_durations_seconds histogram +# HELP teleport_db_connection_durations_seconds Duration of DB connection +teleport_db_connection_durations_seconds_bucket{le="1"} 3 +teleport_db_connection_durations_seconds_bucket{le="5"} 6 +teleport_db_connection_durations_seconds_bucket{le="10"} 8 +teleport_db_connection_durations_seconds_bucket{le="+Inf"} 8 +teleport_db_connection_durations_seconds_count 8 +teleport_db_connection_durations_seconds_sum 43 + +# TYPE teleport_db_connection_setup_time_seconds histogram +# HELP teleport_db_connection_setup_time_seconds Initial time to setup DB connection, before any requests are handled +teleport_db_connection_setup_time_seconds_bucket{le="0.01"} 2 +teleport_db_connection_setup_time_seconds_bucket{le="0.1"} 4 +teleport_db_connection_setup_time_seconds_bucket{le="1"} 6 +teleport_db_connection_setup_time_seconds_bucket{le="+Inf"} 6 +teleport_db_connection_setup_time_seconds_count 6 +teleport_db_connection_setup_time_seconds_sum 2.5 + +# TYPE teleport_db_errors_total counter +# HELP teleport_db_errors_total Number of synthetic DB errors sent to the client +teleport_db_errors_total 2 + +# TYPE bpf_lost_command_events counter +bpf_lost_command_events{} 320 + +# TYPE bpf_lost_disk_events counter +bpf_lost_disk_events{} 214 + +# TYPE bpf_lost_network_events counter +bpf_lost_network_events{} 587 diff --git a/teleport/tests/docker/caddy/fixtures/readyz/get.json b/teleport/tests/docker/caddy/fixtures/readyz/get.json new file mode 100644 index 0000000000000..51c30934a439d --- /dev/null +++ b/teleport/tests/docker/caddy/fixtures/readyz/get.json @@ -0,0 +1 @@ +{ "status": "ok" } diff --git a/teleport/tests/docker/caddy/metrics_mock_randomizer.py b/teleport/tests/docker/caddy/metrics_mock_randomizer.py new file mode 100644 index 0000000000000..28ab1494cedbb --- /dev/null +++ b/teleport/tests/docker/caddy/metrics_mock_randomizer.py @@ -0,0 +1,83 @@ +import random +import sys +import time + + +def main(): + if len(sys.argv) < 2: + sys.exit(0) + + metrics_file = sys.argv[1] + + while True: + read_and_get_new_metrics(metrics_file) + sleep_time = random.randint(1, 30) + time.sleep(sleep_time) + + +def read_and_get_new_metrics(metrics_file): + new_content = [] + + with open(metrics_file, "r") as metrics: + for line in metrics.readlines(): + line = line.rstrip('\n') + if line == "": + new_content.append(line) + continue + if line.startswith("#"): + new_content.append(line) + continue + + fields = line.split(" ") + + metric_name = fields[0] + value_str = fields[-1] + + is_percentage = "percentage" in metric_name + + value = None + is_float = False + + if "." in value_str: + is_float = True + value = float(value_str) + else: + value = int(value_str) + + new_value = modify_value(value, is_percentage, is_float) + new_line = fields[:-1] + if metric_name == "process_state": + new_value = random.choice([0, 1, 2, 3]) + + if "bytes" in metric_name: + new_value *= random.choice([10, 1000, 10000]) + new_value = new_value % 686047984 + new_line.append(str(new_value)) + new_content.append(" ".join(new_line)) + + with open(metrics_file, "w") as metrics: + new_file_content = "\n".join(new_content) + metrics.write(new_file_content) + + +def modify_value(value, is_percentage, is_float): + if is_float and value == 0.0: + value = random.uniform(0.0, 0.2) + if not is_float and value == 0: + value = random.randint(0, 20) + change_percent = random.uniform(0.05, 0.1) + change_direction = random.choice([1, -1]) + change = value * (change_percent * change_direction) + new_value = value + change + + if is_percentage: + new_value = min(new_value, 1.0) + new_value = max(new_value, 0.0) + + if not is_float: + return int(new_value) + return new_value + + +if __name__ == "__main__": + main() diff --git a/teleport/tests/docker/docker-compose.yaml b/teleport/tests/docker/teleport/docker-compose.yaml similarity index 80% rename from teleport/tests/docker/docker-compose.yaml rename to teleport/tests/docker/teleport/docker-compose.yaml index 6956aa04df3e5..a621c33f9a542 100644 --- a/teleport/tests/docker/docker-compose.yaml +++ b/teleport/tests/docker/teleport/docker-compose.yaml @@ -1,5 +1,5 @@ services: - teleport: + teleport-service: image: public.ecr.aws/gravitational/teleport:14.3 ports: - 3000:3000 @@ -9,3 +9,4 @@ services: volumes: - ./etc/teleport:/etc/teleport command: --diag-addr=0.0.0.0:3000 + container_name: teleport-service diff --git a/teleport/tests/docker/etc/teleport/teleport.yaml b/teleport/tests/docker/teleport/etc/teleport/teleport.yaml similarity index 100% rename from teleport/tests/docker/etc/teleport/teleport.yaml rename to teleport/tests/docker/teleport/etc/teleport/teleport.yaml diff --git a/teleport/tests/test_e2e.py b/teleport/tests/test_e2e.py index e515410bea863..6810cfb96afca 100644 --- a/teleport/tests/test_e2e.py +++ b/teleport/tests/test_e2e.py @@ -4,17 +4,21 @@ import pytest -from .common import COMMON_METRICS, INSTANCE +from .common import COMMON_METRICS, INSTANCE, USE_TELEPORT_CADDY + +pytestmark = [ + pytest.mark.e2e, + pytest.mark.skipif(not USE_TELEPORT_CADDY, reason="Only run e2e tests on caddy environment"), +] -pytestmark = pytest.mark.e2e CONFIG = { - 'init_config': {}, - 'instances': [INSTANCE], + "init_config": {}, + "instances": [INSTANCE], } def test_teleport_e2e(dd_agent_check): - aggregator = dd_agent_check(CONFIG) + aggregator = dd_agent_check() aggregator.assert_metric("teleport.health.up", value=1, count=1, tags=["teleport_status:ok"]) aggregator.assert_metric(f"teleport.{COMMON_METRICS[0]}") diff --git a/teleport/tests/test_integration.py b/teleport/tests/test_integration.py index 76e5ac2b0cda3..36a414fee2e3e 100644 --- a/teleport/tests/test_integration.py +++ b/teleport/tests/test_integration.py @@ -6,9 +6,13 @@ from datadog_checks.teleport import TeleportCheck -from .common import COMMON_METRICS +from .common import COMMON_METRICS, USE_TELEPORT_CADDY -pytestmark = [pytest.mark.integration, pytest.mark.usefixtures("dd_environment")] +pytestmark = [ + pytest.mark.integration, + pytest.mark.usefixtures("dd_environment"), + pytest.mark.skipif(not USE_TELEPORT_CADDY, reason="Only run integration tests on non-caddy environment"), +] def test_connect_ok(aggregator, instance, dd_run_check): diff --git a/temporal_cloud/CHANGELOG.md b/temporal_cloud/CHANGELOG.md index a64023ed3d344..b457162511a56 100644 --- a/temporal_cloud/CHANGELOG.md +++ b/temporal_cloud/CHANGELOG.md @@ -1,4 +1,4 @@ -# CHANGELOG - Temporal_Cloud +# CHANGELOG - Temporal Cloud ## 1.0.0 / 2024-11-26 diff --git a/temporal_cloud/README.md b/temporal_cloud/README.md index c780d1f397562..13dc7db2355a0 100644 --- a/temporal_cloud/README.md +++ b/temporal_cloud/README.md @@ -1,41 +1,56 @@ ## Overview -This check monitors [Temporal_Cloud][1]. +[Temporal Cloud][1] is a scalable platform for orchestrating complex workflows, with built-in reliability, resilience, and timing controls. Temporal Cloud enables developers to focus on application logic without worrying about fault tolerance and consistency. + + +This integration gathers Temporal Cloud metrics into Datadog, offering insights into system health, workflow efficiency, task execution, and performance bottlenecks. ## Setup -### Installation +### Generate a Metrics endpoint URL in Temporal Cloud + +1. To generate a CA certificate and an end-entity certificate, see [certificate management][2]. + - **Note**: An expired root CA certificate invalidates all downstream certificates. To avoid disruptions to your systems, use certificates with long validity periods. +2. Log in to [Temporal Cloud][3] with an account owner or global admin role. +3. Go to **Settings**, and select the **Observability** tab. +4. Under the **Certificates** section, add your root CA certificate (`.pem` file content) and save it. + - **Note**: If an observability endpoint is already set up, you can append your root CA certificate. +5. Click **Save** to generate the endpoint URL under the **Endpoint** section. The URL should look like: `https://.tmprl.cloud/prometheus`. -The Temporal_Cloud check is included in the [Datadog Agent][2] package. -No additional installation is needed on your server. -### Configuration +### Connect your Temporal Cloud account to Datadog -!!! Add list of steps to set up this integration !!! +1. Add your Account ID, End-entity Certificate file content, and End-entity Certificate key file content + |Parameters|Description| + |--------------------|--------------------| + |Account ID|Temporal Cloud account ID to be used as part of the metrics endpoint URL: `https://.tmprl.cloud/prometheus`.| + |End-entity certificate file content|Contents of the end-entity certificate for secure access and communication with the Metrics endpoint.| + |End-entity certificate key file content|Content of the end-entity certificate key for secure access and communication with the Metrics endpoint.| -### Validation +2. Click the **Save** button to save your settings. -!!! Add steps to validate integration is functioning as expected !!! ## Data Collected ### Metrics -Temporal_Cloud does not include any metrics. +See [metadata.csv][4] for a list of metrics provided by this integration. + ### Service Checks -Temporal_Cloud does not include any service checks. +The Temporal Cloud integration does not include any service checks. ### Events -Temporal_Cloud does not include any events. - -## Troubleshooting +The Temporal Cloud integration does not include any events. -Need help? Contact [Datadog support][3]. +## Support -[1]: **LINK_TO_INTEGRATION_SITE** -[2]: https://app.datadoghq.com/account/settings/agent/latest -[3]: https://docs.datadoghq.com/help/ +Need help? Contact [Datadog support][5]. +[1]: https://temporal.io/cloud/ +[2]: https://docs.temporal.io/cloud/certificates#use-certstrap/ +[3]: https://cloud.temporal.io/ +[4]: https://github.com/DataDog/integrations-core/blob/master/temporal_cloud/metadata.csv +[5]: https://docs.datadoghq.com/help/ diff --git a/temporal_cloud/assets/dashboards/temporal_cloud_overview.json b/temporal_cloud/assets/dashboards/temporal_cloud_overview.json new file mode 100644 index 0000000000000..1c84d8b87d085 --- /dev/null +++ b/temporal_cloud/assets/dashboards/temporal_cloud_overview.json @@ -0,0 +1,3771 @@ +{ + "title": "Temporal Cloud - Overview", + "description": "This dashboard provides insights into system health, performance and workflow efficiency for your Temporal Cloud instance.", + "widgets": [ + { + "id": 8740298734186812, + "definition": { + "type": "image", + "url": "https://images.ctfassets.net/0uuz8ydxyd9p/6lHpuU1sKtTBbWj6VS1Llh/7d48148041d51d513c5820cb1a0e7d5d/Temporal_LogoLockup_Horizontal_dark_1_2x.png", + "url_dark_theme": "https://images.ctfassets.net/0uuz8ydxyd9p/2ctnUPEhKA75tYnrl2Kzvj/90563965bc4ea2af9442b6eb4ba43180/Temporal_LogoLockup_Horizontal_light_1_2x.png", + "sizing": "contain", + "margin": "sm", + "has_background": true, + "has_border": false, + "vertical_align": "center", + "horizontal_align": "center" + }, + "layout": { + "x": 0, + "y": 0, + "width": 6, + "height": 2 + } + }, + { + "id": 5349449283720096, + "definition": { + "title": "Monitors Summary", + "background_color": "vivid_blue", + "show_title": true, + "type": "group", + "layout_type": "ordered", + "widgets": [ + { + "id": 8407084925998778, + "definition": { + "title": "Monitors Summary", + "type": "manage_status", + "display_format": "countsAndList", + "color_preference": "text", + "hide_zero_counts": true, + "show_status": true, + "last_triggered_format": "relative", + "query": "tag:(integration:temporal-cloud)", + "sort": "status,asc", + "count": 50, + "start": 0, + "summary_type": "monitors", + "show_priority": false, + "show_last_triggered": false + }, + "layout": { + "x": 0, + "y": 0, + "width": 6, + "height": 4 + } + } + ] + }, + "layout": { + "x": 6, + "y": 0, + "width": 6, + "height": 5 + } + }, + { + "id": 323399805713430, + "definition": { + "type": "note", + "content": "**[Temporal Cloud](https://temporal.io/cloud)** streamlines scalable application development by orchestrating workflows, retries, and state management.\n\nThis dashboard provides insights into system health, performance and workflow efficiency for your Temporal Cloud instance.\n\nFor more information, see the [Temporal Cloud Integration Documentation](https://docs.datadoghq.com/integrations/temporal_cloud/).\n\n**Tip**:\n- Clone this dashboard to rearrange, modify and add widgets and visualizations.", + "background_color": "gray", + "font_size": "14", + "text_align": "left", + "vertical_align": "top", + "show_tick": true, + "tick_pos": "50%", + "tick_edge": "top", + "has_padding": true + }, + "layout": { + "x": 0, + "y": 2, + "width": 6, + "height": 3 + } + }, + { + "id": 6745713151482410, + "definition": { + "title": "Service Latency Metrics", + "background_color": "vivid_blue", + "show_title": true, + "type": "group", + "layout_type": "ordered", + "widgets": [ + { + "id": 6596360524579660, + "definition": { + "title": "Avg StartWorkflowExecution Service Latency (P50)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p50{operation:startworkflowexecution,$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 0, + "width": 4, + "height": 3 + } + }, + { + "id": 8775051559517178, + "definition": { + "title": "Avg SignalWorkflowExecution Service Latency (P50)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p50{operation:signalworkflowexecution,$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green", + "custom_bg_color": "#f82a2a" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow", + "custom_bg_color": "#3cec7f" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "type": "area" + } + }, + "layout": { + "x": 4, + "y": 0, + "width": 4, + "height": 3 + } + }, + { + "id": 2585371837746560, + "definition": { + "title": "Avg SignalWithStartWorkflowExecution Service Latency (P50)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p50{operation:signalwithstartworkflowexecution,$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green", + "custom_bg_color": "#9f1e1e" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow", + "custom_bg_color": "#73e28f" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 8, + "y": 0, + "width": 4, + "height": 3 + } + }, + { + "id": 8602979691001942, + "definition": { + "title": "Service Latency (P50) over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p50{$Namespace} by {operation}" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 0, + "y": 3, + "width": 12, + "height": 4 + } + }, + { + "id": 2720217781741226, + "definition": { + "title": "Avg StartWorkflowExecution Service Latency (P90)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p90{operation:startworkflowexecution,$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 7, + "width": 4, + "height": 3 + } + }, + { + "id": 3521776471353254, + "definition": { + "title": "Avg SignalWorkflowExecution Service Latency (P90)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p90{operation:signalworkflowexecution,$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green", + "custom_bg_color": "#f82a2a" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow", + "custom_bg_color": "#3cec7f" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "type": "area" + } + }, + "layout": { + "x": 4, + "y": 7, + "width": 4, + "height": 3 + } + }, + { + "id": 6556065257189900, + "definition": { + "title": "Avg SignalWithStartWorkflowExecution Service Latency (P90)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p90{operation:signalwithstartworkflowexecution,$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green", + "custom_bg_color": "#9f1e1e" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow", + "custom_bg_color": "#73e28f" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 8, + "y": 7, + "width": 4, + "height": 3 + } + }, + { + "id": 7831482080727952, + "definition": { + "title": "Service Latency (P90) over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p90{$Namespace} by {operation}" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 0, + "y": 10, + "width": 12, + "height": 4 + } + }, + { + "id": 3811540902858216, + "definition": { + "title": "Avg StartWorkflowExecution Service Latency (P95)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p95{operation:startworkflowexecution,$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 14, + "width": 4, + "height": 3 + } + }, + { + "id": 6386629657872384, + "definition": { + "title": "Avg SignalWorkflowExecution Service Latency (P95)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p95{operation:signalworkflowexecution,$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green", + "custom_bg_color": "#f82a2a" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow", + "custom_bg_color": "#3cec7f" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "type": "area" + } + }, + "layout": { + "x": 4, + "y": 14, + "width": 4, + "height": 3 + } + }, + { + "id": 5711438364606902, + "definition": { + "title": "Avg SignalWithStartWorkflowExecution Service Latency (P95)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p95{operation:signalwithstartworkflowexecution,$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green", + "custom_bg_color": "#9f1e1e" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow", + "custom_bg_color": "#73e28f" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 8, + "y": 14, + "width": 4, + "height": 3 + } + }, + { + "id": 5217501760119796, + "definition": { + "title": "Service Latency (P95) over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p95{$Namespace} by {operation}" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 0, + "y": 17, + "width": 12, + "height": 4 + } + }, + { + "id": 7206975012378310, + "definition": { + "title": "Avg StartWorkflowExecution Service Latency (P99)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p99{operation:startworkflowexecution,$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 21, + "width": 4, + "height": 3 + } + }, + { + "id": 9005631171257400, + "definition": { + "title": "Avg SignalWorkflowExecution Service Latency (P99)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p99{operation:signalworkflowexecution,$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green", + "custom_bg_color": "#f82a2a" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow", + "custom_bg_color": "#3cec7f" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "type": "area" + } + }, + "layout": { + "x": 4, + "y": 21, + "width": 4, + "height": 3 + } + }, + { + "id": 8404232948383498, + "definition": { + "title": "Avg SignalWithStartWorkflowExecution Service Latency (P99)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p99{operation:signalwithstartworkflowexecution,$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green", + "custom_bg_color": "#9f1e1e" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow", + "custom_bg_color": "#73e28f" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 8, + "y": 21, + "width": 4, + "height": 3 + } + }, + { + "id": 7206412369508920, + "definition": { + "title": "Service Latency (P99) over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "avg:temporal_cloud.cloud_metrics.v0_service_latency_p99{$Namespace} by {operation}" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 0, + "y": 24, + "width": 12, + "height": 4 + } + } + ] + }, + "layout": { + "x": 0, + "y": 5, + "width": 12, + "height": 1 + } + }, + { + "id": 8893308504271682, + "definition": { + "title": "Frontend Service Overview", + "background_color": "vivid_blue", + "show_title": true, + "type": "group", + "layout_type": "ordered", + "widgets": [ + { + "id": 298345148190314, + "definition": { + "title": "Avg gRPC Error Rate", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_frontend_service_error_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 0, + "width": 3, + "height": 3 + } + }, + { + "id": 4801477252304832, + "definition": { + "title": "gRPC Error Rate over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_frontend_service_error_increase1m{$Namespace} by {operation}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 3, + "y": 0, + "width": 9, + "height": 3 + } + }, + { + "id": 7948259305138766, + "definition": { + "title": "Avg gRPC Request Rate", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_frontend_service_request_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_green" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 3, + "width": 3, + "height": 3 + } + }, + { + "id": 2182787751685328, + "definition": { + "title": "gRPC Request Rate over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_frontend_service_request_increase1m{$Namespace} by {operation}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 3, + "y": 3, + "width": 9, + "height": 3 + } + }, + { + "id": 2367630247389044, + "definition": { + "title": "Avg Rate-Limited Requests Rate", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_resource_exhausted_error_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_red", + "custom_bg_color": "#699263", + "custom_fg_color": "#729e6b" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 6, + "width": 3, + "height": 3 + } + }, + { + "id": 5987314578723806, + "definition": { + "title": "Rate-Limited Request Rate over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_resource_exhausted_error_increase1m{$Namespace} by {resource_exhausted_cause}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 3, + "y": 6, + "width": 9, + "height": 3 + } + }, + { + "id": 3903383398206538, + "definition": { + "title": "Avg State Transition Rate", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_state_transition_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_green", + "custom_fg_color": "#b0d058" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 9, + "width": 3, + "height": 3 + } + }, + { + "id": 3169127014752062, + "definition": { + "title": "State Transition Rate over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_state_transition_increase1m{$Namespace} by {temporal_namespace}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 3, + "y": 9, + "width": 9, + "height": 3 + } + }, + { + "id": 7939379235471620, + "definition": { + "title": "Avg Actions Per Second (APS)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_total_action_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "custom_bg", + "custom_fg_color": "#f00a0a", + "custom_bg_color": "#65a8e6" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 12, + "width": 3, + "height": 3 + } + }, + { + "id": 786592074502738, + "definition": { + "title": "Actions Per Second over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_total_action_increase1m{$Namespace} by {temporal_namespace}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 3, + "y": 12, + "width": 9, + "height": 3 + } + }, + { + "id": 5981689788717980, + "definition": { + "title": "gRPC Error Percentage", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_frontend_service_error_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + }, + { + "name": "query2", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_frontend_service_request_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + }, + "formula": "(query1 / query2) * 100" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_yellow", + "custom_fg_color": "#df7777" + }, + { + "comparator": ">", + "value": 5, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 15, + "width": 3, + "height": 4 + } + }, + { + "id": 4183326508294474, + "definition": { + "title": "Actions Per Second by Namespace Mode", + "title_size": "16", + "title_align": "left", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_total_action_increase1m{$Namespace} by {namespace_mode}.as_rate()", + "aggregator": "avg" + } + ], + "response_format": "scalar", + "style": { + "palette": "datadog16" + }, + "sort": { + "count": 500, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "type": "sunburst", + "hide_total": false, + "legend": { + "type": "table" + } + }, + "layout": { + "x": 3, + "y": 15, + "width": 9, + "height": 4 + } + }, + { + "id": 4916903371261782, + "definition": { + "title": "Top Operations by gRPC Requests Rate", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_frontend_service_request_increase1m{$Namespace} by {operation}.as_rate()", + "aggregator": "avg" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + } + } + }, + "layout": { + "x": 0, + "y": 19, + "width": 4, + "height": 4 + } + }, + { + "id": 2325501122225016, + "definition": { + "title": "Top Cause for Rate-limited Requests", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_resource_exhausted_error_increase1m{$Namespace} by {resource_exhausted_cause}.as_rate()", + "aggregator": "avg" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 500, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + }, + "palette": "datadog16" + } + }, + "layout": { + "x": 4, + "y": 19, + "width": 4, + "height": 4 + } + }, + { + "id": 8249413955524886, + "definition": { + "title": "Top Operations by gRPC Error Rate", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_frontend_service_error_increase1m{$Namespace} by {operation}.as_rate()", + "aggregator": "avg" + } + ], + "response_format": "scalar", + "formulas": [ + { + "formula": "query1" + } + ], + "sort": { + "count": 10, + "order_by": [ + { + "type": "formula", + "index": 0, + "order": "desc" + } + ] + } + } + ], + "style": { + "display": { + "type": "stacked", + "legend": "automatic" + } + } + }, + "layout": { + "x": 8, + "y": 19, + "width": 4, + "height": 4 + } + } + ] + }, + "layout": { + "x": 0, + "y": 6, + "width": 12, + "height": 1 + } + }, + { + "id": 1266663704400704, + "definition": { + "title": "Task Polling Overview", + "background_color": "vivid_blue", + "show_title": true, + "type": "group", + "layout_type": "ordered", + "widgets": [ + { + "id": 2236256623748982, + "definition": { + "title": "Avg Task Poll Success Rate", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_poll_success_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_green" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 0, + "width": 4, + "height": 3 + } + }, + { + "id": 2495307867177718, + "definition": { + "title": "Task Poll Success Rate over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_poll_success_increase1m{$Namespace} by {task_type}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 0, + "width": 8, + "height": 3 + } + }, + { + "id": 474393580792922, + "definition": { + "title": "Task Sync Match Percentage", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_poll_success_sync_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + }, + { + "name": "query2", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_poll_success_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + }, + "formula": "(query1 / query2) * 100" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_green" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 3, + "width": 3, + "height": 3 + } + }, + { + "id": 5326722891696668, + "definition": { + "title": "Avg Task Poll Sync Success Rate", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_poll_success_sync_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_green" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 3, + "y": 3, + "width": 3, + "height": 3 + } + }, + { + "id": 2731591411925024, + "definition": { + "title": "Task Types by Poll Timeout", + "title_size": "16", + "title_align": "left", + "requests": [ + { + "formulas": [ + { + "formula": "query1", + "limit": { + "order": "desc" + } + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_poll_timeout_increase1m{$Namespace} by {task_type}.as_rate()", + "aggregator": "avg" + } + ], + "response_format": "scalar", + "style": { + "palette": "datadog16" + } + } + ], + "type": "sunburst", + "legend": { + "type": "table" + } + }, + "layout": { + "x": 6, + "y": 3, + "width": 6, + "height": 6 + } + }, + { + "id": 8353431131543564, + "definition": { + "title": "Avg Task Poll Timeout Rate ", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_poll_timeout_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "response_format": "scalar", + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 6, + "width": 3, + "height": 3 + } + }, + { + "id": 7537330670601152, + "definition": { + "title": "Poll Timeout Percentage", + "title_size": "16", + "title_align": "left", + "time": {}, + "type": "query_value", + "requests": [ + { + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "percent" + } + }, + "formula": "(query1 / (query1 + query2 + query3)) * 100" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_poll_timeout_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + }, + { + "name": "query2", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_poll_success_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + }, + { + "name": "query3", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_poll_success_sync_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "response_format": "scalar", + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 3, + "y": 6, + "width": 3, + "height": 3 + } + }, + { + "id": 3860476950453548, + "definition": { + "title": "Task Types by Poll Success", + "title_size": "16", + "title_align": "left", + "requests": [ + { + "formulas": [ + { + "formula": "query1", + "limit": { + "order": "desc" + } + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_poll_success_increase1m{$Namespace} by {task_type}.as_rate()", + "aggregator": "avg" + } + ], + "response_format": "scalar", + "style": { + "palette": "datadog16" + } + } + ], + "type": "sunburst", + "legend": { + "type": "table" + } + }, + "layout": { + "x": 0, + "y": 9, + "width": 6, + "height": 4 + } + }, + { + "id": 7112687077524268, + "definition": { + "title": "Task Types by Poll Sync Success", + "title_size": "16", + "title_align": "left", + "requests": [ + { + "formulas": [ + { + "formula": "query1", + "limit": { + "order": "desc" + } + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_poll_success_sync_increase1m{$Namespace} by {task_type}.as_rate()", + "aggregator": "avg" + } + ], + "response_format": "scalar", + "style": { + "palette": "datadog16" + } + } + ], + "type": "sunburst", + "legend": { + "type": "table" + } + }, + "layout": { + "x": 6, + "y": 9, + "width": 6, + "height": 4 + } + } + ] + }, + "layout": { + "x": 0, + "y": 7, + "width": 12, + "height": 1, + "is_column_break": true + } + }, + { + "id": 7334379080331064, + "definition": { + "title": "Scheduled Workflow Overview", + "background_color": "vivid_blue", + "show_title": true, + "type": "group", + "layout_type": "ordered", + "widgets": [ + { + "id": 4608661273329986, + "definition": { + "title": "Avg Scheduled Workflow Success Rate", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_schedule_action_success_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_green" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 0, + "width": 4, + "height": 3 + } + }, + { + "id": 4494292675722788, + "definition": { + "title": "Scheduled Workflow Success Rate over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_schedule_action_success_increase1m{$Namespace} by {temporal_namespace}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 0, + "width": 8, + "height": 3 + } + }, + { + "id": 6473814659260960, + "definition": { + "title": "Avg Buffer Overrun Rate", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_schedule_buffer_overruns_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 3, + "width": 4, + "height": 3 + } + }, + { + "id": 1932274358183966, + "definition": { + "title": "Buffer Overrun Rate over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_schedule_buffer_overruns_increase1m{$Namespace} by {temporal_namespace}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 3, + "width": 8, + "height": 3 + } + }, + { + "id": 4132996472752296, + "definition": { + "title": "Avg Missed Catch-Up Window Rate", + "title_size": "16", + "title_align": "left", + "time": {}, + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_schedule_missed_catchup_window_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 6, + "width": 4, + "height": 3 + } + }, + { + "id": 2813049649778638, + "definition": { + "title": "Missed Catch-Up Windows Rate over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_schedule_missed_catchup_window_increase1m{$Namespace} by {temporal_namespace}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 6, + "width": 8, + "height": 3 + } + }, + { + "id": 5931318482078592, + "definition": { + "title": "Avg Rate-Limited Workflow Rate", + "title_size": "16", + "title_align": "left", + "time": {}, + "type": "query_value", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_schedule_rate_limited_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "response_format": "scalar", + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 9, + "width": 4, + "height": 3 + } + }, + { + "id": 8093326950057642, + "definition": { + "title": "Rate-Limited Workflow Rate over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_schedule_rate_limited_increase1m{$Namespace} by {temporal_namespace}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 9, + "width": 8, + "height": 3 + } + } + ] + }, + "layout": { + "x": 0, + "y": 8, + "width": 12, + "height": 1 + } + }, + { + "id": 5070840369040136, + "definition": { + "title": "Workflow Overview", + "background_color": "vivid_blue", + "show_title": true, + "type": "group", + "layout_type": "ordered", + "widgets": [ + { + "id": 7390564078879392, + "definition": { + "title": "Avg Workflow Cancellation Rate", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_workflow_cancel_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 0, + "width": 4, + "height": 3 + } + }, + { + "id": 1229883020208502, + "definition": { + "title": "Workflow Cancellation Rate over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_workflow_cancel_increase1m{$Namespace} by {temporal_namespace}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 0, + "width": 8, + "height": 3 + } + }, + { + "id": 2309946839235418, + "definition": { + "title": "Avg Continued-As-New Workflow Rate", + "title_size": "16", + "title_align": "left", + "time": {}, + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_workflow_continued_as_new_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "custom_bg", + "custom_bg_color": "#65a8e6" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 3, + "width": 4, + "height": 3 + } + }, + { + "id": 38538954247440, + "definition": { + "title": "Continued-As-New Workflow Rate over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_workflow_continued_as_new_increase1m{$Namespace} by {temporal_namespace}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 3, + "width": 8, + "height": 3 + } + }, + { + "id": 2741121569755672, + "definition": { + "title": "Avg Workflow Failure Rate", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_workflow_failed_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 6, + "width": 4, + "height": 3 + } + }, + { + "id": 4128572692652006, + "definition": { + "title": "Workflow Failure Rate over Time ", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_workflow_failed_increase1m{$Namespace} by {temporal_namespace}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 6, + "width": 8, + "height": 3 + } + }, + { + "id": 5478455287138638, + "definition": { + "title": "Avg Workflow Success Rate", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_workflow_success_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_green" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 9, + "width": 4, + "height": 3 + } + }, + { + "id": 2007567121576290, + "definition": { + "title": "Workflow Success Rate over Time ", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_workflow_success_increase1m{$Namespace} by {temporal_namespace}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 9, + "width": 8, + "height": 3 + } + }, + { + "id": 6862787917948638, + "definition": { + "title": "Avg Workflow Termination Rate", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_workflow_terminate_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 12, + "width": 4, + "height": 3 + } + }, + { + "id": 3869465964272090, + "definition": { + "title": "Workflow Termination Rate over Time ", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_workflow_terminate_increase1m{$Namespace} by {temporal_namespace}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 12, + "width": 8, + "height": 3 + } + }, + { + "id": 1056987583365914, + "definition": { + "title": "Avg Workflow Timeout Rate", + "title_size": "16", + "title_align": "left", + "time": {}, + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_workflow_timeout_increase1m{$Namespace}.as_rate()", + "aggregator": "avg" + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": ">", + "value": 0, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "yaxis": { + "include_zero": true + }, + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 15, + "width": 4, + "height": 3 + } + }, + { + "id": 5172304773900428, + "definition": { + "title": "Workflow Timeout Rate over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "name": "query1", + "data_source": "metrics", + "query": "sum:temporal_cloud.cloud_metrics.v0_workflow_timeout_increase1m{$Namespace} by {temporal_namespace}.as_rate()" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 15, + "width": 8, + "height": 3 + } + } + ] + }, + "layout": { + "x": 0, + "y": 9, + "width": 12, + "height": 1 + } + }, + { + "id": 2643743209353550, + "definition": { + "title": "Replication Lag Overview", + "background_color": "vivid_blue", + "show_title": true, + "type": "group", + "layout_type": "ordered", + "widgets": [ + { + "id": 4988540215976294, + "definition": { + "title": "Avg Replication Lag (P50)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:temporal_cloud.cloud_metrics.v0_replication_lag_p50{$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 0, + "width": 4, + "height": 3 + } + }, + { + "id": 7728657278450226, + "definition": { + "title": "Replication Lag (P50) over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "time": {}, + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:temporal_cloud.cloud_metrics.v0_replication_lag_p50{$Namespace} by {temporal_namespace}" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 0, + "width": 8, + "height": 3 + } + }, + { + "id": 6760508313946898, + "definition": { + "title": "Avg Replication Lag (P90)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:temporal_cloud.cloud_metrics.v0_replication_lag_p90{$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 3, + "width": 4, + "height": 3 + } + }, + { + "id": 411485780589910, + "definition": { + "title": "Replication Lag (P90) over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "time": {}, + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:temporal_cloud.cloud_metrics.v0_replication_lag_p90{$Namespace} by {temporal_namespace}" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 3, + "width": 8, + "height": 3 + } + }, + { + "id": 1521266053652292, + "definition": { + "title": "Avg Replication Lag (P95)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:temporal_cloud.cloud_metrics.v0_replication_lag_p95{$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 6, + "width": 4, + "height": 3 + } + }, + { + "id": 7111826559326150, + "definition": { + "title": "Replication Lag (P95) over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:temporal_cloud.cloud_metrics.v0_replication_lag_p95{$Namespace} by {temporal_namespace}" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 6, + "width": 8, + "height": 3 + } + }, + { + "id": 1857933666822954, + "definition": { + "title": "Avg Replication Lag (P99)", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "response_format": "scalar", + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:temporal_cloud.cloud_metrics.v0_replication_lag_p99{$Namespace}", + "aggregator": "avg" + } + ], + "formulas": [ + { + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "second" + } + }, + "formula": "query1" + } + ], + "conditional_formats": [ + { + "comparator": "<=", + "value": 0.1, + "palette": "black_on_light_green" + }, + { + "comparator": ">", + "value": 0.1, + "palette": "black_on_light_yellow" + }, + { + "comparator": ">", + "value": 0.2, + "palette": "black_on_light_red" + } + ] + } + ], + "autoscale": false, + "precision": 2, + "timeseries_background": { + "type": "area" + } + }, + "layout": { + "x": 0, + "y": 9, + "width": 4, + "height": 3 + } + }, + { + "id": 1978734770597192, + "definition": { + "title": "Replication Lag (P99) over Time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:temporal_cloud.cloud_metrics.v0_replication_lag_p99{$Namespace} by {temporal_namespace}" + } + ], + "response_format": "timeseries", + "style": { + "palette": "dog_classic", + "order_by": "values", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ] + }, + "layout": { + "x": 4, + "y": 9, + "width": 8, + "height": 3 + } + } + ] + }, + "layout": { + "x": 0, + "y": 10, + "width": 12, + "height": 1 + } + } + ], + "template_variables": [ + { + "name": "Namespace", + "prefix": "temporal_namespace", + "available_values": [], + "default": "*" + } + ], + "layout_type": "ordered", + "notify_list": [], + "reflow_type": "fixed" +} diff --git a/temporal_cloud/assets/monitors/high_grpc_error_percentage.json b/temporal_cloud/assets/monitors/high_grpc_error_percentage.json new file mode 100644 index 0000000000000..8415371395d7b --- /dev/null +++ b/temporal_cloud/assets/monitors/high_grpc_error_percentage.json @@ -0,0 +1,35 @@ +{ + "version": 2, + "created_at": "2024-12-10", + "last_updated_at": "2024-12-10", + "title": "High gRPC error percentage", + "description": "This monitor alerts when the percentage of gRPC errors exceeds the defined threshold for your Temporal Cloud instance, indicating potential issues with service communication that could impact workflow executions and overall system reliability.", + "definition": { + "id": 159196278, + "name": "High gRPC error percentage", + "type": "query alert", + "query": "avg(last_5m):(sum:temporal_cloud.cloud_metrics.v0_frontend_service_error_increase1m{*} by {temporal_namespace,operation}.as_rate() / sum:temporal_cloud.cloud_metrics.v0_frontend_service_request_increase1m{*} by {temporal_namespace,operation}.as_rate()) * 100 > 10", + "message": "{{#is_warning}}\nThe gRPC error percentage for Temporal Cloud operation: **{{operation.name}}** in namespace: **{{temporal_namespace.name}}** has exceeded the warning threshold.\nCurrent error percentage: **{{value}}%**\nThreshold: {{warn_threshold}}%\n{{/is_warning}}\n\n{{#is_alert}}\nThe gRPC error percentage for Temporal Cloud operation: **{{operation.name}}** in namespace: **{{temporal_namespace.name}}** has exceeded the alert threshold.\nCurrent error percentage: **{{value}}%**\nThreshold: {{threshold}}%\n{{/is_alert}}\n\n@example@example.com", + "tags": [ + "integration:temporal-cloud" + ], + "options": { + "thresholds": { + "critical": 10, + "warning": 5 + }, + "notify_audit": false, + "on_missing_data": "show_no_data", + "include_tags": false, + "new_group_delay": 0, + "silenced": {} + }, + "priority": 1, + "restriction_policy": { + "bindings": [] + } + }, + "tags": [ + "integration:temporal-cloud" + ] +} diff --git a/temporal_cloud/assets/monitors/high_service_latency.json b/temporal_cloud/assets/monitors/high_service_latency.json new file mode 100644 index 0000000000000..7de6684cb62e8 --- /dev/null +++ b/temporal_cloud/assets/monitors/high_service_latency.json @@ -0,0 +1,35 @@ +{ + "version": 2, + "created_at": "2024-12-10", + "last_updated_at": "2024-12-10", + "title": "High service latency", + "description": "This monitor alerts when the 99th percentile service latency exceeds the defined threshold for your Temporal Cloud instance, indicating potential performance degradation that could impact workflow execution times and overall system responsiveness.", + "definition": { + "id": 160133052, + "name": "High service latency", + "type": "query alert", + "query": "avg(last_5m):avg:temporal_cloud.cloud_metrics.v0_service_latency_p99{*} by {temporal_namespace,operation} > 0.2", + "message": "{{#is_warning}}\nThe P99 service latency for Temporal Cloud operation: **{{operation.name}}** in namespace: **{{temporal_namespace.name}}** has exceeded the warning threshold.\nCurrent Service Latency (P99): **{{value}} seconds**\nThreshold: {{warn_threshold}} seconds\n{{/is_warning}}\n\n{{#is_alert}}\nThe P99 service latency for Temporal Cloud operation: **{{operation.name}}** in namespace: **{{temporal_namespace.name}}** has exceeded the alert threshold.\nCurrent Service Latency (P99): **{{value}} seconds**\nThreshold: {{threshold}} seconds\n{{/is_alert}}\n\n@example@example.com", + "tags": [ + "integration:temporal-cloud" + ], + "options": { + "thresholds": { + "critical": 0.2, + "warning": 0.1 + }, + "notify_audit": false, + "on_missing_data": "show_no_data", + "include_tags": false, + "new_group_delay": 60, + "silenced": {} + }, + "priority": 2, + "restriction_policy": { + "bindings": [] + } + }, + "tags": [ + "integration:temporal-cloud" + ] +} diff --git a/temporal_cloud/assets/temporal_cloud.svg b/temporal_cloud/assets/temporal_cloud.svg new file mode 100644 index 0000000000000..d152fae7bb724 --- /dev/null +++ b/temporal_cloud/assets/temporal_cloud.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/temporal_cloud/images/temporal_cloud_overview_1.png b/temporal_cloud/images/temporal_cloud_overview_1.png new file mode 100644 index 0000000000000..e8a7187f37ae5 Binary files /dev/null and b/temporal_cloud/images/temporal_cloud_overview_1.png differ diff --git a/temporal_cloud/images/temporal_cloud_overview_2.png b/temporal_cloud/images/temporal_cloud_overview_2.png new file mode 100644 index 0000000000000..3f9977dcfcf82 Binary files /dev/null and b/temporal_cloud/images/temporal_cloud_overview_2.png differ diff --git a/temporal_cloud/images/temporal_cloud_overview_3.png b/temporal_cloud/images/temporal_cloud_overview_3.png new file mode 100644 index 0000000000000..cd08c34104b58 Binary files /dev/null and b/temporal_cloud/images/temporal_cloud_overview_3.png differ diff --git a/temporal_cloud/images/temporal_cloud_overview_4.png b/temporal_cloud/images/temporal_cloud_overview_4.png new file mode 100644 index 0000000000000..112cdb093414f Binary files /dev/null and b/temporal_cloud/images/temporal_cloud_overview_4.png differ diff --git a/temporal_cloud/images/temporal_cloud_overview_5.png b/temporal_cloud/images/temporal_cloud_overview_5.png new file mode 100644 index 0000000000000..1462ff04449a8 Binary files /dev/null and b/temporal_cloud/images/temporal_cloud_overview_5.png differ diff --git a/temporal_cloud/manifest.json b/temporal_cloud/manifest.json index 6a777edc5ab90..d8ffd57a41b10 100644 --- a/temporal_cloud/manifest.json +++ b/temporal_cloud/manifest.json @@ -8,10 +8,38 @@ "configuration": "README.md#Setup", "support": "README.md#Support", "changelog": "CHANGELOG.md", - "description": "", + "description": "Gain insights into system health, workflow efficiency, task execution and performance bottlenecks for your instance.", "title": "Temporal Cloud", - "media": [], + "media": [ + { + "caption": "Temporal Cloud - Overview 1", + "image_url": "images/temporal_cloud_overview_1.png", + "media_type": "image" + }, + { + "caption": "Temporal Cloud - Overview 2", + "image_url": "images/temporal_cloud_overview_2.png", + "media_type": "image" + }, + { + "caption": "Temporal Cloud - Overview 3", + "image_url": "images/temporal_cloud_overview_3.png", + "media_type": "image" + }, + { + "caption": "Temporal Cloud - Overview 4", + "image_url": "images/temporal_cloud_overview_4.png", + "media_type": "image" + }, + { + "caption": "Temporal Cloud - Overview 5", + "image_url": "images/temporal_cloud_overview_5.png", + "media_type": "image" + } + ], "classifier_tags": [ + "Category::Cloud", + "Category::Developer Tools", "Category::Metrics", "Offering::Integration", "Submitted Data Type::Metrics" @@ -27,12 +55,19 @@ }, "metrics": { "prefix": "temporal_cloud.", - "check": [], + "check": "temporal_cloud.cloud_metrics.v0_frontend_service_request_increase1m", "metadata_path": "metadata.csv" }, "service_checks": { "metadata_path": "assets/service_checks.json" } + }, + "dashboards": { + "Temporal Cloud - Overview": "assets/dashboards/temporal_cloud_overview.json" + }, + "monitors": { + "High gRPC error percentage": "assets/monitors/high_grpc_error_percentage.json", + "High service latency": "assets/monitors/high_service_latency.json" } }, "author": { diff --git a/temporal_cloud/metadata.csv b/temporal_cloud/metadata.csv index 02cde5e98381e..036ddb89057a4 100644 --- a/temporal_cloud/metadata.csv +++ b/temporal_cloud/metadata.csv @@ -1 +1,27 @@ metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name,curated_metric,sample_tags +temporal_cloud.cloud_metrics.v0_frontend_service_error_increase1m,count,60,,,Increase in gRPC errors,-1,temporal_cloud,Frontend Service Error,, +temporal_cloud.cloud_metrics.v0_frontend_service_request_increase1m,count,60,,,Increase in gRPC requests received,0,temporal_cloud,Frontend Service Request,, +temporal_cloud.cloud_metrics.v0_poll_success_increase1m,count,60,,,Increase in count tasks that are successfully matched to a poller,1,temporal_cloud,Poll Success,, +temporal_cloud.cloud_metrics.v0_poll_success_sync_increase1m,count,60,,,Increase in count tasks that are successfully sync matched to a poller,1,temporal_cloud,Poll Success Sync,, +temporal_cloud.cloud_metrics.v0_poll_timeout_increase1m,count,60,,,"When no tasks are available for a poller before timing out, this is increase in count of such tasks",-1,temporal_cloud,Poll Timeout,, +temporal_cloud.cloud_metrics.v0_replication_lag_p50,gauge,,second,,P50 value using histogram of replication lag during a specific time interval for a multi-region Namespace.,-1,temporal_cloud,Replication Lag P50,, +temporal_cloud.cloud_metrics.v0_replication_lag_p90,gauge,,second,,P90 value using histogram of replication lag during a specific time interval for a multi-region Namespace.,-1,temporal_cloud,Replication Lag P90,, +temporal_cloud.cloud_metrics.v0_replication_lag_p95,gauge,,second,,P95 value using histogram of replication lag during a specific time interval for a multi-region Namespace.,-1,temporal_cloud,Replication Lag P95,, +temporal_cloud.cloud_metrics.v0_replication_lag_p99,gauge,,second,,P99 value using histogram of replication lag during a specific time interval for a multi-region Namespace.,-1,temporal_cloud,Replication Lag P59,, +temporal_cloud.cloud_metrics.v0_resource_exhausted_error_increase1m,count,60,,,Increase in gRPC requests received that were rate-limited,-1,temporal_cloud,Resource Exhausted Error,, +temporal_cloud.cloud_metrics.v0_schedule_action_success_increase1m,count,60,,,Increase in count of successful execution of a Scheduled Workflow.,1,temporal_cloud,Schedule Action Success,, +temporal_cloud.cloud_metrics.v0_schedule_buffer_overruns_increase1m,count,60,,,"When average schedule run length is greater than average schedule interval while a buffer_all overlap policy is configured, this is the increase in count of such scheduled workflow executions",-1,temporal_cloud,Schedule Buffer Overruns,, +temporal_cloud.cloud_metrics.v0_schedule_missed_catchup_window_increase1m,count,60,,,Increase in count of skipped Scheduled executions when Workflows were delayed longer than the catchup window.,-1,temporal_cloud,Schedule Missed Catchup Window,, +temporal_cloud.cloud_metrics.v0_schedule_rate_limited_increase1m,count,60,,,Increase in count of Scheduled Workflows that were delayed due to exceeding a rate limit.,-1,temporal_cloud,Schedule Rate Limited,, +temporal_cloud.cloud_metrics.v0_service_latency_p50,gauge,,second,,"P50 latency for SignalWithStartWorkflowExecution, SignalWorkflowExecution, StartWorkflowExecution operations.",-1,temporal_cloud,Service Latency P50,, +temporal_cloud.cloud_metrics.v0_service_latency_p90,gauge,,second,,"P90 latency for SignalWithStartWorkflowExecution, SignalWorkflowExecution, StartWorkflowExecution operations.",-1,temporal_cloud,Service Latency P90,, +temporal_cloud.cloud_metrics.v0_service_latency_p95,gauge,,second,,"P95 latency for SignalWithStartWorkflowExecution, SignalWorkflowExecution, StartWorkflowExecution operations.",-1,temporal_cloud,Service Latency P95,, +temporal_cloud.cloud_metrics.v0_service_latency_p99,gauge,,second,,"P99 latency for SignalWithStartWorkflowExecution, SignalWorkflowExecution, StartWorkflowExecution operations.",-1,temporal_cloud,Service Latency P99,, +temporal_cloud.cloud_metrics.v0_state_transition_increase1m,count,60,,,Increase in count of state transitions for each Namespace,0,temporal_cloud,State Transition,, +temporal_cloud.cloud_metrics.v0_total_action_increase1m,count,60,,,Increase in count of Temporal Cloud Actions,0,temporal_cloud,Total Action,, +temporal_cloud.cloud_metrics.v0_workflow_cancel_increase1m,count,60,,,Increase in count of Workflows canceled before completing execution.,-1,temporal_cloud,Workflow Cancel,, +temporal_cloud.cloud_metrics.v0_workflow_continued_as_new_increase1m,count,60,,,Increase in count of Workflow Executions that were Continued-As-New from a past execution.,0,temporal_cloud,Workflow Continued As New,, +temporal_cloud.cloud_metrics.v0_workflow_failed_increase1m,count,60,,,Increase in count of Workflows that failed before completion.,-1,temporal_cloud,Workflow Failed,, +temporal_cloud.cloud_metrics.v0_workflow_success_increase1m,count,60,,,Increase in count of Workflows that successfully completed.,1,temporal_cloud,Workflow Success,, +temporal_cloud.cloud_metrics.v0_workflow_terminate_increase1m,count,60,,,Increase in count of Workflows terminated before completing execution.,-1,temporal_cloud,Workflow Terminate,, +temporal_cloud.cloud_metrics.v0_workflow_timeout_increase1m,count,60,,,Increase in count of Workflows that timed out before completing execution.,-1,temporal_cloud,Workflow Timeout,, diff --git a/tibco_ems/README.md b/tibco_ems/README.md index f6fd38fa81118..d3469729c2545 100644 --- a/tibco_ems/README.md +++ b/tibco_ems/README.md @@ -99,11 +99,11 @@ The TIBCO EMS integration does not include any events. ### Service Checks -See [service_checks.json][8] for a list of service checks provided by this integration. +The TIBCO EMS integration does not include any service checks. ## Troubleshooting -Need help? Contact [Datadog support][9]. +Need help? Contact [Datadog support][8]. [1]: https://docs.tibco.com/products/tibco-enterprise-message-service @@ -113,5 +113,4 @@ Need help? Contact [Datadog support][9]. [5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent [6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information [7]: https://github.com/DataDog/integrations-core/blob/master/tibco_ems/metadata.csv -[8]: https://github.com/DataDog/integrations-core/blob/master/tibco_ems/assets/service_checks.json -[9]: https://docs.datadoghq.com/help/ +[8]: https://docs.datadoghq.com/help/ diff --git a/vsphere/CHANGELOG.md b/vsphere/CHANGELOG.md index a74374232b029..9bc75b4d8a6d6 100644 --- a/vsphere/CHANGELOG.md +++ b/vsphere/CHANGELOG.md @@ -8,7 +8,7 @@ * Added support for vSAN metric/event collection in the vSphere integration ([#18117](https://github.com/DataDog/integrations-core/pull/18117)) -## 8.0.1 / 2024-10-31 +## 8.0.1 / 2024-10-31 / Agent 7.60.0 ***Fixed***: diff --git a/wincrashdetect/manifest.json b/wincrashdetect/manifest.json index 681efef23fd89..06c1d13dc6ce1 100644 --- a/wincrashdetect/manifest.json +++ b/wincrashdetect/manifest.json @@ -14,7 +14,18 @@ "classifier_tags": [ "Supported OS::Windows", "Category::OS & System", + "Category::Windows", "Offering::Integration" + ], + "resources": [ + { + "resource_type": "documentation", + "url": "https://docs.datadoghq.com/integrations/wincrashdetect/" + }, + { + "resource_type": "blog", + "url": "https://www.datadoghq.com/blog/troubleshoot-windows-blue-screen-errors/" + } ] }, "author": {