diff --git a/.github/dockerfiles/Dockerfile.clang b/.github/dockerfiles/Dockerfile.clang index 869254198c18fc..4864fa0d4f83b1 100644 --- a/.github/dockerfiles/Dockerfile.clang +++ b/.github/dockerfiles/Dockerfile.clang @@ -16,4 +16,4 @@ WORKDIR /netdata COPY . . # Build Netdata -RUN ./netdata-installer.sh --dont-wait --dont-start-it --disable-go --require-cloud +RUN ./netdata-installer.sh --dont-wait --dont-start-it --disable-go diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 073d85b7a824df..516d2c1a0b1ff0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -930,24 +930,18 @@ jobs: id: load if: needs.file-check.outputs.run == 'true' run: docker load --input image.tar - - name: netdata-installer on ${{ matrix.distro }}, disable cloud - id: build-no-cloud - if: needs.file-check.outputs.run == 'true' - run: | - docker run --security-opt seccomp=unconfined -w /netdata test:${{ matrix.artifact_key }} \ - /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --disable-cloud --one-time-build ${{ needs.file-check.outputs.skip-go }}' - name: netdata-installer on ${{ matrix.distro }}, require cloud id: build-cloud if: needs.file-check.outputs.run == 'true' run: | docker run --security-opt seccomp=unconfined -w /netdata test:${{ matrix.artifact_key }} \ - /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --require-cloud --one-time-build ${{ needs.file-check.outputs.skip-go }}' + /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --one-time-build ${{ needs.file-check.outputs.skip-go }}' - name: netdata-installer on ${{ matrix.distro }}, require cloud, no JSON-C id: build-no-jsonc if: matrix.jsonc_removal != '' && needs.file-check.outputs.run == 'true' run: | docker run --security-opt seccomp=unconfined -w /netdata test:${{ matrix.artifact_key }} \ - /bin/sh -c '/rmjsonc.sh && ./netdata-installer.sh --dont-wait --dont-start-it --require-cloud --one-time-build ${{ needs.file-check.outputs.skip-go }}' + /bin/sh -c '/rmjsonc.sh && ./netdata-installer.sh --dont-wait --dont-start-it --one-time-build ${{ needs.file-check.outputs.skip-go }}' - name: Failure Notification uses: rtCamp/action-slack-notify@v2 env: @@ -1015,7 +1009,7 @@ jobs: id: build-source if: needs.file-check.outputs.run == 'true' run: | - sudo bash ./netdata-installer.sh --install-no-prefix /usr/local/netdata --dont-wait --dont-start-it --require-cloud --one-time-build + sudo bash ./netdata-installer.sh --install-no-prefix /usr/local/netdata --dont-wait --dont-start-it --one-time-build - name: Test Agent start up id: test-agent if: needs.file-check.outputs.run == 'true' diff --git a/.gitignore b/.gitignore index 05b503bb308c01..8f5842dbe410dc 100644 --- a/.gitignore +++ b/.gitignore @@ -88,7 +88,6 @@ system/systemd/netdata-updater.service !system/systemd/netdata.service.*.in src/health/notifications/alarm-notify.sh -claim/netdata-claim.sh src/collectors/cgroups.plugin/cgroup-name.sh src/collectors/cgroups.plugin/cgroup-network-helper.sh src/collectors/tc.plugin/tc-qos-helper.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index 3b13664e18e82f..c49c7ff5537c75 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -147,8 +147,6 @@ option(DEFAULT_FEATURE_STATE "Specify the default state for most optional featur mark_as_advanced(DEFAULT_FEATURE_STATE) # High-level features -option(ENABLE_ACLK "Enable Netdata Cloud support (ACLK)" ${DEFAULT_FEATURE_STATE}) -option(ENABLE_CLOUD "Enable Netdata Cloud by default at runtime" ${DEFAULT_FEATURE_STATE}) option(ENABLE_ML "Enable machine learning features" ${DEFAULT_FEATURE_STATE}) option(ENABLE_DBENGINE "Enable dbengine metrics storage" True) @@ -198,11 +196,7 @@ mark_as_advanced(BUILD_FOR_PACKAGING) cmake_dependent_option(FORCE_LEGACY_LIBBPF "Force usage of libbpf 0.0.9 instead of the latest version." False "ENABLE_PLUGIN_EBPF" False) mark_as_advanced(FORCE_LEGACY_LIBBPF) -if(ENABLE_ACLK OR ENABLE_EXPORTER_PROMETHEUS_REMOTE_WRITE) - set(NEED_PROTOBUF True) -else() - set(NEED_PROTOBUF False) -endif() +set(NEED_PROTOBUF True) if(ENABLE_PLUGIN_GO) include(NetdataGoTools) @@ -261,6 +255,9 @@ if(ENABLE_PLUGIN_EBPF) netdata_fetch_ebpf_co_re() endif() +pkg_check_modules(CURL libcurl>=7.21 REQUIRED IMPORTED_TARGET) +set(HAVE_LIBCURL TRUE) + # # Libm # @@ -524,7 +521,6 @@ if(OS_FREEBSD OR OS_MACOS) endif() # openssl/crypto -set(ENABLE_OPENSSL True) pkg_check_modules(TLS IMPORTED_TARGET openssl) if(NOT TARGET PkgConfig::TLS) @@ -743,14 +739,23 @@ set(LIBNETDATA_FILES src/libnetdata/os/setenv.h src/libnetdata/os/strndup.c src/libnetdata/os/strndup.h - src/libnetdata/spawn_server/spawn_server.c + src/libnetdata/spawn_server/spawn_server_nofork.c src/libnetdata/spawn_server/spawn_server.h src/libnetdata/spawn_server/spawn_popen.c src/libnetdata/spawn_server/spawn_popen.h + src/libnetdata/spawn_server/spawn_server_windows.c + src/libnetdata/spawn_server/spawn_server_internals.h + src/libnetdata/spawn_server/spawn_server_libuv.c + src/libnetdata/spawn_server/spawn_server_posix.c + src/libnetdata/spawn_server/spawn_library.c + src/libnetdata/spawn_server/spawn_library.h src/libnetdata/os/close_range.c src/libnetdata/os/close_range.h src/libnetdata/os/setproctitle.c src/libnetdata/os/setproctitle.h + src/libnetdata/paths/paths.c + src/libnetdata/paths/paths.h + src/libnetdata/json/json-c-parser-inline.c ) if(ENABLE_PLUGIN_EBPF) @@ -849,14 +854,15 @@ set(DAEMON_FILES src/daemon/common.h src/daemon/daemon.c src/daemon/daemon.h - src/daemon/event_loop.c - src/daemon/event_loop.h + src/daemon/libuv_workers.c + src/daemon/libuv_workers.h src/daemon/global_statistics.c src/daemon/global_statistics.h src/daemon/analytics.c src/daemon/analytics.h src/daemon/main.c src/daemon/main.h + src/daemon/environment.c src/daemon/win_system-info.c src/daemon/win_system-info.h src/daemon/signals.c @@ -905,16 +911,65 @@ set(API_PLUGIN_FILES src/web/api/web_api_v1.h src/web/api/web_api_v2.c src/web/api/web_api_v2.h + src/web/api/web_api_v3.c + src/web/api/web_api_v3.h src/web/api/http_auth.c src/web/api/http_auth.h src/web/api/http_header.c src/web/api/http_header.h - src/web/api/badges/web_buffer_svg.c - src/web/api/badges/web_buffer_svg.h - src/web/api/exporters/allmetrics.c - src/web/api/exporters/allmetrics.h - src/web/api/exporters/shell/allmetrics_shell.c - src/web/api/exporters/shell/allmetrics_shell.h + src/web/api/maps/rrdr_options.c + src/web/api/maps/rrdr_options.h + src/web/api/maps/contexts_options.c + src/web/api/maps/contexts_options.h + src/web/api/maps/datasource_formats.c + src/web/api/maps/datasource_formats.h + src/web/api/maps/maps.h + src/web/api/maps/contexts_alert_statuses.c + src/web/api/maps/contexts_alert_statuses.h + src/web/api/v1/api_v1_allmetrics.c + src/web/api/v1/api_v1_badge/web_buffer_svg.c + src/web/api/v1/api_v1_function.c + src/web/api/v1/api_v1_manage.c + src/web/api/v1/api_v1_calls.h + src/web/api/v1/api_v1_dbengine.c + src/web/api/v1/api_v1_config.c + src/web/api/v1/api_v1_functions.c + src/web/api/v1/api_v1_weights.c + src/web/api/v1/api_v1_info.c + src/web/api/v1/api_v1_registry.c + src/web/api/v1/api_v1_data.c + src/web/api/v1/api_v1_contexts.c + src/web/api/v1/api_v1_ml_info.c + src/web/api/v1/api_v1_aclk.c + src/web/api/v1/api_v1_context.c + src/web/api/v1/api_v1_alarms.c + src/web/api/v1/api_v1_charts.c + src/web/api/v2/api_v2_info.c + src/web/api/v2/api_v2_nodes.c + src/web/api/v2/api_v2_node_instances.c + src/web/api/v2/api_v2_q.c + src/web/api/v2/api_v2_versions.c + src/web/api/v2/api_v2_functions.c + src/web/api/v2/api_v2_alerts.c + src/web/api/v2/api_v2_alert_transitions.c + src/web/api/v2/api_v2_ilove/ilove.c + src/web/api/v2/api_v2_bearer.c + src/web/api/v2/api_v2_calls.h + src/web/api/v2/api_v2_data.c + src/web/api/v2/api_v2_progress.c + src/web/api/v2/api_v2_weights.c + src/web/api/v2/api_v2_alert_config.c + src/web/api/v2/api_v2_contexts.c + src/web/api/v2/api_v2_claim.c + src/web/api/v2/api_v2_webrtc.c + src/web/api/v3/api_v3_calls.h + src/web/api/v3/api_v3_settings.c + src/web/api/functions/functions.c + src/web/api/functions/functions.h + src/web/api/functions/function-progress.c + src/web/api/functions/function-progress.h + src/web/api/functions/function-streaming.c + src/web/api/functions/function-streaming.h src/web/api/queries/rrdr.c src/web/api/queries/rrdr.h src/web/api/queries/query.c @@ -961,10 +1016,11 @@ set(API_PLUGIN_FILES src/web/api/formatters/charts2json.h src/web/api/formatters/rrdset2json.c src/web/api/formatters/rrdset2json.h - src/web/api/ilove/ilove.c - src/web/api/ilove/ilove.h src/web/rtc/webrtc.c src/web/rtc/webrtc.h + src/web/api/functions/function-bearer_get_token.c + src/web/api/functions/function-bearer_get_token.h + src/web/api/v3/api_v3_me.c ) set(EXPORTING_ENGINE_FILES @@ -1055,8 +1111,14 @@ set(PLUGINSD_PLUGIN_FILES ) set(RRD_PLUGIN_FILES - src/database/contexts/api_v1.c - src/database/contexts/api_v2.c + src/database/contexts/api_v1_contexts.c + src/database/contexts/api_v2_contexts.c + src/database/contexts/api_v2_contexts.h + src/database/contexts/api_v2_contexts_agents.c + src/database/contexts/api_v2_contexts_alerts.c + src/database/contexts/api_v2_contexts_alerts.h + src/database/contexts/api_v2_contexts_alert_transitions.c + src/database/contexts/api_v2_contexts_alert_config.c src/database/contexts/context.c src/database/contexts/instance.c src/database/contexts/internal.h @@ -1073,10 +1135,6 @@ set(RRD_PLUGIN_FILES src/database/rrdfunctions.h src/database/rrdfunctions-inline.c src/database/rrdfunctions-inline.h - src/database/rrdfunctions-progress.c - src/database/rrdfunctions-progress.h - src/database/rrdfunctions-streaming.c - src/database/rrdfunctions-streaming.h src/database/rrdhost.c src/database/rrdlabels.c src/database/rrd.c @@ -1200,6 +1258,10 @@ set(STREAMING_PLUGIN_FILES src/streaming/replication.c src/streaming/replication.h src/streaming/common.h + src/streaming/protocol/command-nodeid.c + src/streaming/protocol/commands.c + src/streaming/protocol/commands.h + src/streaming/protocol/command-claimed_id.c ) set(WEB_PLUGIN_FILES @@ -1216,6 +1278,12 @@ set(WEB_PLUGIN_FILES set(CLAIM_PLUGIN_FILES src/claim/claim.c src/claim/claim.h + src/claim/claim_id.c + src/claim/claim_id.h + src/claim/cloud-conf.c + src/claim/claim-with-api.c + src/claim/cloud-status.c + src/claim/cloud-status.h ) set(ACLK_ALWAYS_BUILD @@ -1677,10 +1745,7 @@ endif() # # mqtt library # -if (ENABLE_H2O OR ENABLE_ACLK) - set(ENABLE_MQTTWEBSOCKETS True) -endif() - +set(ENABLE_MQTTWEBSOCKETS True) if(ENABLE_MQTTWEBSOCKETS) add_library(mqttwebsockets STATIC ${MQTT_WEBSOCKETS_FILES}) @@ -1695,20 +1760,17 @@ if(ENABLE_MQTTWEBSOCKETS) endif() -if(ENABLE_ACLK) - # - # proto definitions - # - netdata_protoc_generate_cpp("${CMAKE_SOURCE_DIR}/src/aclk/aclk-schemas" - "${CMAKE_SOURCE_DIR}/src/aclk/aclk-schemas" - ACLK_PROTO_BUILT_SRCS - ACLK_PROTO_BUILT_HDRS - ${ACLK_PROTO_DEFS}) - - list(APPEND ACLK_FILES ${ACLK_PROTO_BUILT_SRCS} - ${ACLK_PROTO_BUILT_HDRS}) +# +# proto definitions +# +netdata_protoc_generate_cpp("${CMAKE_SOURCE_DIR}/src/aclk/aclk-schemas" + "${CMAKE_SOURCE_DIR}/src/aclk/aclk-schemas" + ACLK_PROTO_BUILT_SRCS + ACLK_PROTO_BUILT_HDRS + ${ACLK_PROTO_DEFS}) -endif() +list(APPEND ACLK_FILES ${ACLK_PROTO_BUILT_SRCS} + ${ACLK_PROTO_BUILT_HDRS}) # # build plugins @@ -1740,6 +1802,9 @@ if(ENABLE_PLUGIN_DEBUGFS) endif() endif() +add_executable(spawn-tester src/libnetdata/spawn_server/spawn-tester.c) +target_link_libraries(spawn-tester libnetdata) + if(ENABLE_PLUGIN_APPS) pkg_check_modules(CAP QUIET libcap) @@ -2164,7 +2229,7 @@ endif() add_executable(netdata ${NETDATA_FILES} - "$<$:${ACLK_FILES}>" + "${ACLK_FILES}" "$<$:${H2O_FILES}>" "$<$:${MONGODB_EXPORTING_FILES}>" "$<$:${PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES}>" @@ -2180,7 +2245,7 @@ target_compile_options(netdata PRIVATE ) target_include_directories(netdata PRIVATE - "$<$:${CMAKE_SOURCE_DIR}/src/aclk/aclk-schemas>" + "${CMAKE_SOURCE_DIR}/src/aclk/aclk-schemas" "$<$:${MONGOC_INCLUDE_DIRS}>" "$<$:${SNAPPY_INCLUDE_DIRS}>" ) @@ -2196,6 +2261,7 @@ target_link_libraries(netdata PRIVATE "$<$:sentry>" "$<$:LibDataChannel::LibDataChannelStatic>" "$<$:h2o>" + "$<$:PkgConfig::CURL>" ) if(NEED_PROTOBUF) @@ -2349,19 +2415,7 @@ set(cachedir_POST "${NETDATA_RUNTIME_PREFIX}/var/cache/netdata") set(registrydir_POST "${NETDATA_RUNTIME_PREFIX}/var/lib/netdata/registry") set(varlibdir_POST "${NETDATA_RUNTIME_PREFIX}/var/lib/netdata") set(netdata_user_POST "${NETDATA_USER}") - -# netdata-claim.sh -if(ENABLE_CLOUD) - set(enable_cloud_POST "yes") -else() - set(enable_cloud_POST "no") -endif() - -if(ENABLE_ACLK) - set(enable_aclk_POST "yes") -else() - set(enable_aclk_POST "no") -endif() +set(netdata_group_POST "${NETDATA_USER}") configure_file(src/claim/netdata-claim.sh.in src/claim/netdata-claim.sh @ONLY) install(PROGRAMS diff --git a/netdata-installer.sh b/netdata-installer.sh index 539348018915a4..b69f17a3a337ba 100755 --- a/netdata-installer.sh +++ b/netdata-installer.sh @@ -202,12 +202,9 @@ USAGE: ${PROGRAM} [options] --nightly-channel Use most recent nightly updates instead of GitHub releases. This results in more frequent updates. --disable-ebpf Disable eBPF Kernel plugin. Default: enabled. - --disable-cloud Disable all Netdata Cloud functionality. - --require-cloud Fail the install if it can't build Netdata Cloud support. --force-legacy-cxx Force usage of an older C++ standard to allow building on older systems. This will usually be autodetected. --enable-plugin-freeipmi Enable the FreeIPMI plugin. Default: enable it when libipmimonitoring is available. --disable-plugin-freeipmi Explicitly disable the FreeIPMI plugin. - --disable-https Explicitly disable TLS support. --disable-dbengine Explicitly disable DB engine support. --enable-plugin-go Enable the Go plugin. Default: Enabled when possible. --disable-plugin-go Disable the Go plugin. @@ -257,7 +254,6 @@ NETDATA_ENABLE_ML="" ENABLE_DBENGINE=1 ENABLE_GO=1 ENABLE_H2O=1 -ENABLE_CLOUD=1 FORCE_LEGACY_CXX=0 NETDATA_CMAKE_OPTIONS="${NETDATA_CMAKE_OPTIONS-}" @@ -279,9 +275,7 @@ while [ -n "${1}" ]; do "--enable-plugin-freeipmi") ENABLE_FREEIPMI=1 ;; "--disable-plugin-freeipmi") ENABLE_FREEIPMI=0 ;; "--disable-https") - ENABLE_DBENGINE=0 - ENABLE_H2O=0 - ENABLE_CLOUD=0 + warning "HTTPS cannot be disabled." ;; "--disable-dbengine") ENABLE_DBENGINE=0 ;; "--enable-plugin-go") ENABLE_GO=1 ;; @@ -328,21 +322,9 @@ while [ -n "${1}" ]; do # XXX: No longer supported ;; "--disable-cloud") - if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then - warning "Cloud explicitly enabled, ignoring --disable-cloud." - else - ENABLE_CLOUD=0 - NETDATA_DISABLE_CLOUD=1 - fi - ;; - "--require-cloud") - if [ -n "${NETDATA_DISABLE_CLOUD}" ]; then - warning "Cloud explicitly disabled, ignoring --require-cloud." - else - ENABLE_CLOUD=1 - NETDATA_REQUIRE_CLOUD=1 - fi + warning "Cloud cannot be disabled." ;; + "--require-cloud") ;; "--build-json-c") NETDATA_BUILD_JSON_C=1 ;; diff --git a/netdata.spec.in b/netdata.spec.in index 67d7de4a08e3dc..aa1cf6ecbf0ade 100644 --- a/netdata.spec.in +++ b/netdata.spec.in @@ -388,8 +388,6 @@ happened, on your systems and applications. %else -DENABLE_EXPORTER_MONGODB=Off \ %endif - -DENABLE_ACLK=On \ - -DENABLE_CLOUD=On \ -DENABLE_DBENGINE=On \ -DENABLE_H2O=On \ -DENABLE_PLUGIN_APPS=On \ diff --git a/packaging/build-package.sh b/packaging/build-package.sh index 453e167f4588d7..d2eedc1c04296c 100755 --- a/packaging/build-package.sh +++ b/packaging/build-package.sh @@ -26,8 +26,6 @@ add_cmake_option() { add_cmake_option CMAKE_BUILD_TYPE RelWithDebInfo add_cmake_option CMAKE_INSTALL_PREFIX / -add_cmake_option ENABLE_ACLK On -add_cmake_option ENABLE_CLOUD On add_cmake_option ENABLE_DBENGINE On add_cmake_option ENABLE_H2O On add_cmake_option ENABLE_ML On diff --git a/packaging/cmake/Modules/NetdataJSONC.cmake b/packaging/cmake/Modules/NetdataJSONC.cmake index 89ec70265029ac..db18c14b2134dd 100644 --- a/packaging/cmake/Modules/NetdataJSONC.cmake +++ b/packaging/cmake/Modules/NetdataJSONC.cmake @@ -71,7 +71,7 @@ endfunction() # NETDATA_JSONC_* variables for later use. macro(netdata_detect_jsonc) if(NOT ENABLE_BUNDLED_JSONC) - pkg_check_modules(JSONC json-c) + pkg_check_modules(JSONC json-c>=0.14) endif() if(NOT JSONC_FOUND) diff --git a/packaging/cmake/config.cmake.h.in b/packaging/cmake/config.cmake.h.in index 57d032693b834c..e3297af032c3f0 100644 --- a/packaging/cmake/config.cmake.h.in +++ b/packaging/cmake/config.cmake.h.in @@ -67,6 +67,7 @@ #cmakedefine HAVE_GETPRIORITY #cmakedefine HAVE_SETENV #cmakedefine HAVE_DLSYM +#cmakedefine HAVE_LIBCURL #cmakedefine HAVE_BACKTRACE #cmakedefine HAVE_CLOSE_RANGE @@ -103,14 +104,10 @@ // enabled features -#cmakedefine ENABLE_OPENSSL -#cmakedefine ENABLE_CLOUD -#cmakedefine ENABLE_ACLK #cmakedefine ENABLE_ML #cmakedefine ENABLE_EXPORTING_MONGODB #cmakedefine ENABLE_H2O #cmakedefine ENABLE_DBENGINE -#cmakedefine ENABLE_HTTPS #cmakedefine ENABLE_LZ4 #cmakedefine ENABLE_ZSTD #cmakedefine ENABLE_BROTLI @@ -182,7 +179,6 @@ // #cmakedefine ENABLE_PROMETHEUS_REMOTE_WRITE // /* NSA spy stuff */ -// #define ENABLE_HTTPS 1 // #cmakedefine01 HAVE_X509_VERIFY_PARAM_set1_host #define HAVE_CRYPTO diff --git a/packaging/dag/imageutils.py b/packaging/dag/imageutils.py index fd1e8ad26bd39d..42aba077c59480 100644 --- a/packaging/dag/imageutils.py +++ b/packaging/dag/imageutils.py @@ -345,7 +345,6 @@ def static_build_netdata( "--dont-wait", "--dont-start-it", "--disable-exporting-mongodb", - "--require-cloud", "--use-system-protobuf", "--dont-scrub-cflags-even-though-it-may-break-things", "--one-time-build", diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index b12af313d74a32..450dbb8bbc203d 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -47,7 +47,6 @@ RUN mkdir -p /app/usr/sbin/ \ mv /var/lib/netdata /app/var/lib/ && \ mv /etc/netdata /app/etc/ && \ mv /usr/sbin/netdata /app/usr/sbin/ && \ - mv /usr/sbin/netdata-claim.sh /app/usr/sbin/ && \ mv /usr/sbin/netdatacli /app/usr/sbin/ && \ mv /usr/sbin/systemd-cat-native /app/usr/sbin/ && \ mv packaging/docker/run.sh /app/usr/sbin/ && \ diff --git a/packaging/docker/run.sh b/packaging/docker/run.sh index 6ba16d1ce5624b..56a818d2f8ff18 100755 --- a/packaging/docker/run.sh +++ b/packaging/docker/run.sh @@ -110,14 +110,4 @@ if [ -w "/etc/netdata" ]; then fi fi -if [ -n "${NETDATA_CLAIM_URL}" ] && [ -n "${NETDATA_CLAIM_TOKEN}" ] && [ ! -f /var/lib/netdata/cloud.d/claimed_id ]; then - # shellcheck disable=SC2086 - /usr/sbin/netdata-claim.sh -token="${NETDATA_CLAIM_TOKEN}" \ - -url="${NETDATA_CLAIM_URL}" \ - ${NETDATA_CLAIM_ROOMS:+-rooms="${NETDATA_CLAIM_ROOMS}"} \ - ${NETDATA_CLAIM_PROXY:+-proxy="${NETDATA_CLAIM_PROXY}"} \ - ${NETDATA_EXTRA_CLAIM_OPTS} \ - -daemon-not-running -fi - exec /usr/sbin/netdata -u "${DOCKER_USR}" -D -s /host -p "${NETDATA_LISTENER_PORT}" "$@" diff --git a/packaging/installer/functions.sh b/packaging/installer/functions.sh index c339ac87c6571e..a4df80866ce0d2 100644 --- a/packaging/installer/functions.sh +++ b/packaging/installer/functions.sh @@ -341,8 +341,6 @@ prepare_cmake_options() { enable_feature PLUGIN_NETWORK_VIEWER "${IS_LINUX}" enable_feature PLUGIN_EBPF "${ENABLE_EBPF:-0}" - enable_feature ACLK "${ENABLE_CLOUD:-1}" - enable_feature CLOUD "${ENABLE_CLOUD:-1}" enable_feature BUNDLED_JSONC "${NETDATA_BUILD_JSON_C:-0}" enable_feature DBENGINE "${ENABLE_DBENGINE:-1}" enable_feature H2O "${ENABLE_H2O:-1}" diff --git a/packaging/installer/install-required-packages.sh b/packaging/installer/install-required-packages.sh index e97902026210ea..e0be30e4619c5c 100755 --- a/packaging/installer/install-required-packages.sh +++ b/packaging/installer/install-required-packages.sh @@ -825,6 +825,17 @@ declare -A pkg_libuuid_dev=( ['default']="" ) +declare -A pkg_libcurl_dev=( + ['alpine']="curl-dev" + ['arch']="curl" + ['clearlinux']="devpkg-curl" + ['debian']="libcurl4-openssl-dev" + ['gentoo']="net-misc/curl" + ['ubuntu']="libcurl4-openssl-dev" + ['macos']="curl" + ['default']="libcurl-devel" +) + declare -A pkg_libmnl_dev=( ['alpine']="libmnl-dev" ['arch']="libmnl" @@ -1246,6 +1257,7 @@ packages() { suitable_package libyaml-dev suitable_package libsystemd-dev suitable_package pcre2 + suitable_package libcurl-dev fi # ------------------------------------------------------------------------- diff --git a/packaging/installer/kickstart.sh b/packaging/installer/kickstart.sh index 72b82be261c753..ee63489378ea7b 100755 --- a/packaging/installer/kickstart.sh +++ b/packaging/installer/kickstart.sh @@ -53,11 +53,9 @@ INSTALL_PREFIX="" NETDATA_AUTO_UPDATES="default" NETDATA_CLAIM_URL="https://app.netdata.cloud" NETDATA_COMMAND="default" -NETDATA_DISABLE_CLOUD=0 NETDATA_INSTALLER_OPTIONS="" NETDATA_FORCE_METHOD="" NETDATA_OFFLINE_INSTALL_SOURCE="" -NETDATA_REQUIRE_CLOUD=1 NETDATA_WARNINGS="" RELEASE_CHANNEL="default" @@ -149,8 +147,6 @@ main() { if [ -n "${NETDATA_CLAIM_TOKEN}" ]; then claim - elif [ "${NETDATA_DISABLE_CLOUD}" -eq 1 ]; then - soft_disable_cloud fi set_auto_updates @@ -185,8 +181,6 @@ USAGE: kickstart.sh [options] --native-only Only install if native binary packages are available. --static-only Only install if a static build is available. --build-only Only install using a local build. - --disable-cloud Disable support for Netdata Cloud (default: detect) - --require-cloud Only install if Netdata Cloud can be enabled. Overrides --disable-cloud. --install-prefix Specify an installation prefix for local builds (default: autodetect based on system type). --old-install-prefix Specify an old local builds installation prefix for uninstall/reinstall (if it's not default). --install-version Specify the version of Netdata to install. @@ -1183,41 +1177,6 @@ handle_existing_install() { esac } -soft_disable_cloud() { - set_tmpdir - - cloud_prefix="${INSTALL_PREFIX}/var/lib/netdata/cloud.d" - - run_as_root mkdir -p "${cloud_prefix}" - - cat > "${tmpdir}/cloud.conf" << EOF -[global] - enabled = no -EOF - - run_as_root cp "${tmpdir}/cloud.conf" "${cloud_prefix}/cloud.conf" - - if [ -z "${NETDATA_NO_START}" ]; then - case "${SYSTYPE}" in - Darwin) run_as_root launchctl kickstart -k com.github.netdata ;; - FreeBSD) run_as_root service netdata restart ;; - Linux) - initpath="$(run_as_root readlink /proc/1/exe)" - - if command -v service > /dev/null 2>&1; then - run_as_root service netdata restart - elif command -v rc-service > /dev/null 2>&1; then - run_as_root rc-service netdata restart - elif [ "$(basename "${initpath}" 2> /dev/null)" = "systemd" ]; then - run_as_root systemctl restart netdata - elif [ -f /etc/init.d/netdata ]; then - run_as_root /etc/init.d/netdata restart - fi - ;; - esac - fi -} - confirm_install_prefix() { if [ -n "${INSTALL_PREFIX}" ] && [ "${NETDATA_FORCE_METHOD}" != 'build' ]; then fatal "The --install-prefix option is only supported together with the --build-only option." F0204 @@ -1246,10 +1205,9 @@ check_claim_opts() { # shellcheck disable=SC2235,SC2030 if [ -z "${NETDATA_CLAIM_TOKEN}" ] && [ -n "${NETDATA_CLAIM_ROOMS}" ]; then fatal "Invalid claiming options, claim rooms may only be specified when a token is specified." F0204 - elif [ -z "${NETDATA_CLAIM_TOKEN}" ] && [ -n "${NETDATA_CLAIM_EXTRA}" ]; then + elif [ -z "${NETDATA_CLAIM_TOKEN}" ] && [ -n "${NETDATA_CLAIM_EXTRA}${NETDATA_CLAIM_PROXY}${NETDATA_CLAIM_NORELOAD}${NETDATA_CLAIM_INSECURE}" ]; then + # The above condition checks if _any_ claiming options other than the rooms have been set when the token is unset. fatal "Invalid claiming options, a claiming token must be specified." F0204 - elif [ "${NETDATA_DISABLE_CLOUD}" -eq 1 ] && [ -n "${NETDATA_CLAIM_TOKEN}" ]; then - fatal "Cloud explicitly disabled, but automatic claiming requested. Either enable Netdata Cloud, or remove the --claim-* options." F0204 fi } @@ -1277,6 +1235,93 @@ is_netdata_running() { fi } +write_claim_config() { + if [ -z "${INSTALL_PREFIX}" ] || [ "${INSTALL_PREFIX}" = "/" ]; then + config_path="/etc/netdata" + netdatacli="$(command -v netdatacli)" + elif [ "${INSTALL_PREFIX}" = "/opt/netdata" ]; then + config_path="/opt/netdata/etc/netdata" + netdatacli="/opt/netdata/bin/netdatacli" + elif [ ! -d "${INSTALL_PREFIX}/netdata" ]; then + config_path="${INSTALL_PREFIX}/etc/netdata" + netdatacli="${INSTALL_PREFIX}/usr/sbin/netdatacli" + else + config_path="${INSTALL_PREFIX}/netdata/etc/netdata" + netdatacli="${INSTALL_PREFIX}/netdata/usr/sbin/netdatacli" + fi + + claim_config="${config_path}/claim.conf" + + if [ "${DRY_RUN}" -eq 1 ]; then + progress "Would attempt to write claiming configuration to ${claim_config}" + return 0 + fi + + progress "Writing claiming configuration to ${claim_config}" + + config="[global]" + config="${config}\n url = ${NETDATA_CLAIM_URL}" + config="${config}\n token = ${NETDATA_CLAIM_TOKEN}" + if [ -n "${NETDATA_CLAIM_ROOMS}" ]; then + config="${config}\n rooms = ${NETDATA_CLAIM_ROOMS}" + fi + if [ -n "${NETDATA_CLAIM_PROXY}" ]; then + config="${config}\n proxy = ${NETDATA_CLAIM_PROXY}" + fi + if [ -n "${NETDATA_CLAIM_INSECURE}" ]; then + config="${config}\n insecure = ${NETDATA_CLAIM_INSECURE}" + fi + + run_as_root touch "${claim_config}.tmp" || return 1 + run_as_root chmod 0640 "${claim_config}.tmp" || return 1 + run_as_root chown ":${NETDATA_CLAIM_GROUP:-netdata}" "${claim_config}.tmp" || return 1 + run_as_root echo "${config}" > "${claim_config}.tmp" || return 1 + run_as_root mv -f "${claim_config}.tmp" "${claim_config}" || return 1 + + if [ -z "${NETDATA_CLAIM_NORELOAD}" ]; then + run_as_root "${netdatacli}" reload-claiming-state || return 1 + fi +} + +run_claim_script() { + if [ -n "${NETDATA_CLAIM_NORELOAD}" ]; then + NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -daemon-not-running" + fi + + if [ -n "${NETDATA_CLAIM_INSECURE}" ]; then + NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -insecure" + fi + + if [ -n "${NETDATA_CLAIM_PROXY}" ]; then + if [ "${NETDATA_CLAIM_PROXY}" = "none" ]; then + NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -noproxy" + else + NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -proxy=${NETDATA_CLAIM_PROXY}" + fi + fi + + # shellcheck disable=SC2086 + run_as_root "${NETDATA_CLAIM_PATH}" -token="${NETDATA_CLAIM_TOKEN}" -rooms="${NETDATA_CLAIM_ROOMS}" -url="${NETDATA_CLAIM_URL}" ${NETDATA_CLAIM_EXTRA} + case $? in + 0) progress "Successfully claimed node" ;; + 1) warning "Unable to claim node due to invalid claiming options. If you are seeing this message, you’ve probably found a bug and should open a bug report at ${AGENT_BUG_REPORT_URL}" ;; + 2) warning "Unable to claim node due to issues creating the claiming directory or preparing the local claiming key. Make sure you have a working openssl command and that ${INSTALL_PREFIX}/var/lib/netdata/cloud.d exists, then try again." ;; + 3) warning "Unable to claim node due to missing dependencies. Usually this means that the Netdata Agent was built without support for Netdata Cloud. If you built the agent from source, please install all needed dependencies for Cloud support. If you used the regular installation script and see this error, please file a bug report at ${AGENT_BUG_REPORT_URL}." ;; + 4) warning "Failed to claim node due to inability to connect to ${NETDATA_CLAIM_URL}. Usually this either means that the specified claiming URL is wrong, or that you are having networking problems." ;; + 5) progress "Successfully claimed node, but was not able to notify the Netdata Agent. You will need to restart the Netdata service on this node before it will show up in the Cloud." ;; + 8) warning "Failed to claim node due to an invalid agent ID. You can usually resolve this by removing ${INSTALL_PREFIX}/var/lib/netdata/registry/netdata.public.unique.id and restarting the agent. Then try to claim it again using the same options." ;; + 9) warning "Failed to claim node due to an invalid node name. This probably means you tried to specify a custom name for this node (for example, using the --claim-hostname option), but the hostname itself was either empty or consisted solely of whitespace. You can resolve this by specifying a valid host name and trying again." ;; + 10) warning "Failed to claim node due to an invalid room ID. This issue is most likely caused by a typo. Please check if the room(s) you are trying to add appear on the list of rooms provided to the --claim-rooms option ('${NETDATA_CLAIM_ROOMS}'). Then verify if the rooms are visible in Netdata Cloud and try again." ;; + 11) warning "Failed to claim node due to an issue with the generated RSA key pair. You can usually resolve this by removing all files in ${INSTALL_PREFIX}/var/lib/netdata/cloud.d and then trying again." ;; + 12) warning "Failed to claim node due to an invalid or expired claiming token. Please check that the token specified with the --claim-token option ('${NETDATA_CLAIM_TOKEN}') matches what you see in the Cloud and try again." ;; + 13) warning "Failed to claim node because the Cloud thinks it is already claimed. If this node was created by cloning a VM or as a container from a template, please remove the file ${INSTALL_PREFIX}/var/lib/netdata/registry/netdata.public.unique.id and restart the agent. Then try to claim it again with the same options. Otherwise, if you are certain this node has never been claimed before, you can use the --claim-id option to specify a new node ID to use for claiming, for example by using the uuidgen command like so: --claim-id \"\$(uuidgen)\"" ;; + 14) warning "Failed to claim node because the node is already in the process of being claimed. You should not need to do anything to resolve this, the node should show up properly in the Cloud soon. If it does not, please report a bug at ${AGENT_BUG_REPORT_URL}." ;; + 15|16|17) warning "Failed to claim node due to an internal server error in the Cloud. Please retry claiming this node later, and if you still see this message file a bug report at ${CLOUD_BUG_REPORT_URL}." ;; + 18) warning "Unable to claim node because this Netdata installation does not have a unique ID yet. Make sure the agent is running and started up correctly, and then try again." ;; + *) warning "Failed to claim node for an unknown reason. This usually means either networking problems or a bug. Please retry claiming later, and if you still see this message file a bug report at ${AGENT_BUG_REPORT_URL}" ;; + esac +} + claim() { if [ "${DRY_RUN}" -eq 1 ]; then progress "Would attempt to claim agent to ${NETDATA_CLAIM_URL}" @@ -1300,17 +1345,18 @@ claim() { NETDATA_CLAIM_PATH="${INSTALL_PREFIX}/netdata/usr/sbin/netdata-claim.sh" fi + method="script" err_msg= err_code= if [ -z "${NETDATA_CLAIM_PATH}" ]; then - err_msg="Unable to claim node: could not find usable claiming script. Reinstalling Netdata may resolve this." - err_code=F050B + method="config" elif [ ! -e "${NETDATA_CLAIM_PATH}" ]; then - err_msg="Unable to claim node: ${NETDATA_CLAIM_PATH} does not exist." - err_code=F0512 + method="config" elif [ ! -f "${NETDATA_CLAIM_PATH}" ]; then err_msg="Unable to claim node: ${NETDATA_CLAIM_PATH} is not a file." err_code=F0513 + elif grep -q '%%NEW_CLAIMING_METHOD%%' "${NETDATA_CLAIM_PATH}"; then + method="config" elif [ ! -x "${NETDATA_CLAIM_PATH}" ]; then err_msg="Unable to claim node: claiming script at ${NETDATA_CLAIM_PATH} is not executable. Reinstalling Netdata may resolve this." err_code=F0514 @@ -1326,34 +1372,16 @@ claim() { fi if ! is_netdata_running; then - NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -daemon-not-running" + NETDATA_CLAIM_NORELOAD=1 fi - # shellcheck disable=SC2086 - run_as_root "${NETDATA_CLAIM_PATH}" -token="${NETDATA_CLAIM_TOKEN}" -rooms="${NETDATA_CLAIM_ROOMS}" -url="${NETDATA_CLAIM_URL}" ${NETDATA_CLAIM_EXTRA} - case $? in - 0) - progress "Successfully claimed node" - return 0 - ;; - 1) warning "Unable to claim node due to invalid claiming options. If you are seeing this message, you’ve probably found a bug and should open a bug report at ${AGENT_BUG_REPORT_URL}" ;; - 2) warning "Unable to claim node due to issues creating the claiming directory or preparing the local claiming key. Make sure you have a working openssl command and that ${INSTALL_PREFIX}/var/lib/netdata/cloud.d exists, then try again." ;; - 3) warning "Unable to claim node due to missing dependencies. Usually this means that the Netdata Agent was built without support for Netdata Cloud. If you built the agent from source, please install all needed dependencies for Cloud support. If you used the regular installation script and see this error, please file a bug report at ${AGENT_BUG_REPORT_URL}." ;; - 4) warning "Failed to claim node due to inability to connect to ${NETDATA_CLAIM_URL}. Usually this either means that the specified claiming URL is wrong, or that you are having networking problems." ;; - 5) - progress "Successfully claimed node, but was not able to notify the Netdata Agent. You will need to restart the Netdata service on this node before it will show up in the Cloud." - return 0 + case ${method} in + script) run_claim_script ;; + config) + if ! write_claim_config; then + warning "Failed to write claiming configuration. This usually means you do not have permissions to access the configuration directory." + fi ;; - 8) warning "Failed to claim node due to an invalid agent ID. You can usually resolve this by removing ${INSTALL_PREFIX}/var/lib/netdata/registry/netdata.public.unique.id and restarting the agent. Then try to claim it again using the same options." ;; - 9) warning "Failed to claim node due to an invalid node name. This probably means you tried to specify a custom name for this node (for example, using the --claim-hostname option), but the hostname itself was either empty or consisted solely of whitespace. You can resolve this by specifying a valid host name and trying again." ;; - 10) warning "Failed to claim node due to an invalid room ID. This issue is most likely caused by a typo. Please check if the room(s) you are trying to add appear on the list of rooms provided to the --claim-rooms option ('${NETDATA_CLAIM_ROOMS}'). Then verify if the rooms are visible in Netdata Cloud and try again." ;; - 11) warning "Failed to claim node due to an issue with the generated RSA key pair. You can usually resolve this by removing all files in ${INSTALL_PREFIX}/var/lib/netdata/cloud.d and then trying again." ;; - 12) warning "Failed to claim node due to an invalid or expired claiming token. Please check that the token specified with the --claim-token option ('${NETDATA_CLAIM_TOKEN}') matches what you see in the Cloud and try again." ;; - 13) warning "Failed to claim node because the Cloud thinks it is already claimed. If this node was created by cloning a VM or as a container from a template, please remove the file ${INSTALL_PREFIX}/var/lib/netdata/registry/netdata.public.unique.id and restart the agent. Then try to claim it again with the same options. Otherwise, if you are certain this node has never been claimed before, you can use the --claim-id option to specify a new node ID to use for claiming, for example by using the uuidgen command like so: --claim-id \"\$(uuidgen)\"" ;; - 14) warning "Failed to claim node because the node is already in the process of being claimed. You should not need to do anything to resolve this, the node should show up properly in the Cloud soon. If it does not, please report a bug at ${AGENT_BUG_REPORT_URL}." ;; - 15|16|17) warning "Failed to claim node due to an internal server error in the Cloud. Please retry claiming this node later, and if you still see this message file a bug report at ${CLOUD_BUG_REPORT_URL}." ;; - 18) warning "Unable to claim node because this Netdata installation does not have a unique ID yet. Make sure the agent is running and started up correctly, and then try again." ;; - *) warning "Failed to claim node for an unknown reason. This usually means either networking problems or a bug. Please retry claiming later, and if you still see this message file a bug report at ${AGENT_BUG_REPORT_URL}" ;; esac if [ "${ACTION}" = "claim" ]; then @@ -1938,12 +1966,6 @@ build_and_install() { opts="${opts} --stable-channel" fi - if [ "${NETDATA_REQUIRE_CLOUD}" -eq 1 ]; then - opts="${opts} --require-cloud" - elif [ "${NETDATA_DISABLE_CLOUD}" -eq 1 ]; then - opts="${opts} --disable-cloud" - fi - # shellcheck disable=SC2086 run_script ./netdata-installer.sh ${opts} @@ -2392,12 +2414,10 @@ parse_args() { esac ;; "--disable-cloud") - NETDATA_DISABLE_CLOUD=1 - NETDATA_REQUIRE_CLOUD=0 + warning "Cloud cannot be disabled" ;; "--require-cloud") - NETDATA_DISABLE_CLOUD=0 - NETDATA_REQUIRE_CLOUD=1 + warning "Cloud is always required" ;; "--dont-start-it") NETDATA_NO_START=1 @@ -2447,26 +2467,21 @@ parse_args() { "--native-only") NETDATA_FORCE_METHOD="native" ;; "--static-only") NETDATA_FORCE_METHOD="static" ;; "--build-only") NETDATA_FORCE_METHOD="build" ;; - "--claim-token") - NETDATA_CLAIM_TOKEN="${2}" - shift 1 - ;; - "--claim-rooms") - NETDATA_CLAIM_ROOMS="${2}" - shift 1 - ;; - "--claim-url") - NETDATA_CLAIM_URL="${2}" - shift 1 - ;; "--claim-"*) optname="$(echo "${1}" | cut -d '-' -f 4-)" case "${optname}" in - id|proxy|user|hostname) + token) NETDATA_CLAIM_TOKEN="${2}"; shift 1 ;; + rooms) NETDATA_CLAIM_ROOMS="${2}"; shift 1 ;; + url) NETDATA_CLAIM_URL="${2}"; shift 1 ;; + proxy) NETDATA_CLAIM_PROXY="${2}"; shift 1 ;; + noproxy) NETDATA_CLAIM_PROXY="none" ;; + insecure) NETDATA_CLAIM_INSECURE=yes ;; + noreload) NETDATA_CLAIM_NORELOAD=1 ;; + id|user|hostname) NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -${optname}=${2}" shift 1 ;; - verbose|insecure|noproxy|noreload|daemon-not-running) NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -${optname}" ;; + verbose|daemon-not-running) NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -${optname}" ;; *) warning "Ignoring unrecognized claiming option ${optname}" ;; esac ;; diff --git a/packaging/installer/methods/freebsd.md b/packaging/installer/methods/freebsd.md index 3a33d2e90d7422..911cef4223b24a 100644 --- a/packaging/installer/methods/freebsd.md +++ b/packaging/installer/methods/freebsd.md @@ -67,7 +67,7 @@ gunzip netdata*.tar.gz && tar xf netdata*.tar && rm -rf netdata*.tar Install Netdata in `/opt/netdata`. If you want to enable automatic updates, add `--auto-update` or `-u` to install `netdata-updater` in `cron` (**need root permission**): ```sh -cd netdata-v* && ./netdata-installer.sh --install-prefix /opt && cp /opt/netdata/usr/sbin/netdata-claim.sh /usr/sbin/ +cd netdata-v* && ./netdata-installer.sh --install-prefix /opt ``` You also need to enable the `netdata` service in `/etc/rc.conf`: @@ -113,9 +113,6 @@ The `kickstart.sh` script accepts a number of optional parameters to control how - `--native-only`: Only install if native binary packages are available. - `--static-only`: Only install if a static build is available. - `--build-only`: Only install using a local build. -- `--disable-cloud`: For local builds, don’t build any of the cloud code at all. For native packages and static builds, - use runtime configuration to disable cloud support. -- `--require-cloud`: Only install if Netdata Cloud can be enabled. Overrides `--disable-cloud`. - `--install-prefix`: Specify an installation prefix for local builds (by default, we use a sane prefix based on the type of system). - `--install-version`: Specify the version of Netdata to install. - `--old-install-prefix`: Specify the custom local build's installation prefix that should be removed. diff --git a/packaging/installer/methods/kickstart.md b/packaging/installer/methods/kickstart.md index a525cc70deb435..6473f50f30f40f 100644 --- a/packaging/installer/methods/kickstart.md +++ b/packaging/installer/methods/kickstart.md @@ -245,10 +245,6 @@ By default, the kickstart script will provide a Netdata agent installation that Specify a proxy to use when connecting to the cloud in the form of `http://[user:pass@]host:ip` for an HTTP(S) proxy. See [connecting through a proxy](/src/claim/README.md#connect-through-a-proxy) for details. - `--claim-only` If there is an existing install, only try to claim it without attempting to update it. If there is no existing install, install and claim Netdata normally. -- `--require-cloud` - Only install if Netdata Cloud can be enabled. -- `--disable-cloud` - For local builds, don’t build any of the Netdata Cloud code at all. For native packages and static builds, use runtime configuration to disable Netdata Cloud support. ### anonymous telemetry diff --git a/packaging/makeself/jobs/70-netdata-git.install.sh b/packaging/makeself/jobs/70-netdata-git.install.sh index 59074ec59af074..33531608bf99d6 100755 --- a/packaging/makeself/jobs/70-netdata-git.install.sh +++ b/packaging/makeself/jobs/70-netdata-git.install.sh @@ -32,7 +32,6 @@ run ./netdata-installer.sh \ --dont-wait \ --dont-start-it \ --disable-exporting-mongodb \ - --require-cloud \ --use-system-protobuf \ --dont-scrub-cflags-even-though-it-may-break-things \ --one-time-build \ diff --git a/packaging/utils/compile-on-windows.sh b/packaging/utils/compile-on-windows.sh new file mode 100644 index 00000000000000..c6cef69f84a7ba --- /dev/null +++ b/packaging/utils/compile-on-windows.sh @@ -0,0 +1,78 @@ +#!/bin/sh + +# On MSYS2, install these dependencies to build netdata: +install_dependencies() { + pacman -S \ + git cmake ninja base-devel msys2-devel \ + libyaml-devel libzstd-devel libutil-linux libutil-linux-devel \ + mingw-w64-x86_64-toolchain mingw-w64-ucrt-x86_64-toolchain \ + mingw64/mingw-w64-x86_64-mold ucrt64/mingw-w64-ucrt-x86_64-mold \ + msys/gdb ucrt64/mingw-w64-ucrt-x86_64-gdb mingw64/mingw-w64-x86_64-gdb \ + msys/zlib-devel mingw64/mingw-w64-x86_64-zlib ucrt64/mingw-w64-ucrt-x86_64-zlib \ + msys/libuv-devel ucrt64/mingw-w64-ucrt-x86_64-libuv mingw64/mingw-w64-x86_64-libuv \ + liblz4-devel mingw64/mingw-w64-x86_64-lz4 ucrt64/mingw-w64-ucrt-x86_64-lz4 \ + openssl-devel mingw64/mingw-w64-x86_64-openssl ucrt64/mingw-w64-ucrt-x86_64-openssl \ + protobuf-devel mingw64/mingw-w64-x86_64-protobuf ucrt64/mingw-w64-ucrt-x86_64-protobuf \ + msys/pcre2-devel mingw64/mingw-w64-x86_64-pcre2 ucrt64/mingw-w64-ucrt-x86_64-pcre2 \ + msys/brotli-devel mingw64/mingw-w64-x86_64-brotli ucrt64/mingw-w64-ucrt-x86_64-brotli \ + msys/ccache ucrt64/mingw-w64-ucrt-x86_64-ccache mingw64/mingw-w64-x86_64-ccache \ + mingw64/mingw-w64-x86_64-go ucrt64/mingw-w64-ucrt-x86_64-go \ + mingw64/mingw-w64-x86_64-nsis \ + msys/libcurl msys/libcurl-devel +} + +if [ "${1}" = "install" ] +then + install_dependencies || exit 1 + exit 0 +fi + +BUILD_FOR_PACKAGING="Off" +if [ "${1}" = "package" ] +then + BUILD_FOR_PACKAGING="On" +fi + +export PATH="/usr/local/bin:${PATH}" + +WT_ROOT="$(pwd)" +BUILD_TYPE="Debug" +NULL="" + +if [ -z "${MSYSTEM}" ]; then + build="${WT_ROOT}/build-${OSTYPE}" +else + build="${WT_ROOT}/build-${OSTYPE}-${MSYSTEM}" +fi + +if [ "$USER" = "vk" ]; then + build="${WT_ROOT}/build" +fi + +set -exu -o pipefail + +if [ -d "${build}" ] +then + rm -rf "${build}" +fi + +/usr/bin/cmake -S "${WT_ROOT}" -B "${build}" \ + -G Ninja \ + -DCMAKE_INSTALL_PREFIX="/opt/netdata" \ + -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \ + -DCMAKE_C_FLAGS="-fstack-protector-all -O0 -ggdb -Wall -Wextra -Wno-char-subscripts -Wa,-mbig-obj -pipe -DNETDATA_INTERNAL_CHECKS=1 -D_FILE_OFFSET_BITS=64 -D__USE_MINGW_ANSI_STDIO=1" \ + -DBUILD_FOR_PACKAGING=${BUILD_FOR_PACKAGING} \ + -DUSE_MOLD=Off \ + -DNETDATA_USER="${USER}" \ + -DDEFAULT_FEATURE_STATE=Off \ + -DENABLE_H2O=Off \ + -DENABLE_ML=On \ + -DENABLE_BUNDLED_JSONC=On \ + -DENABLE_BUNDLED_PROTOBUF=Off \ + ${NULL} + +ninja -v -C "${build}" install || ninja -v -C "${build}" -j 1 + +echo +echo "Compile with:" +echo "ninja -v -C \"${build}\" install || ninja -v -C \"${build}\" -j 1" diff --git a/packaging/windows/msys2-dependencies.sh b/packaging/windows/msys2-dependencies.sh index 95a1952df8820c..9a32ea4ec49e43 100755 --- a/packaging/windows/msys2-dependencies.sh +++ b/packaging/windows/msys2-dependencies.sh @@ -15,11 +15,23 @@ pacman -S --noconfirm --needed \ base-devel \ cmake \ git \ + ninja \ + python \ liblz4-devel \ libutil-linux \ libutil-linux-devel \ libyaml-devel \ libzstd-devel \ + msys2-devel \ + msys/brotli-devel \ + msys/libuv-devel \ + msys/pcre2-devel \ + msys/zlib-devel \ + msys/libcurl-devel \ + openssl-devel \ + protobuf-devel \ + mingw-w64-x86_64-toolchain \ + mingw-w64-ucrt-x86_64-toolchain \ mingw64/mingw-w64-x86_64-brotli \ mingw64/mingw-w64-x86_64-go \ mingw64/mingw-w64-x86_64-libuv \ @@ -29,16 +41,6 @@ pacman -S --noconfirm --needed \ mingw64/mingw-w64-x86_64-pcre2 \ mingw64/mingw-w64-x86_64-protobuf \ mingw64/mingw-w64-x86_64-zlib \ - mingw-w64-ucrt-x86_64-toolchain \ - mingw-w64-x86_64-toolchain \ - msys2-devel \ - msys/brotli-devel \ - msys/libuv-devel \ - msys/pcre2-devel \ - msys/zlib-devel \ - openssl-devel \ - protobuf-devel \ - python \ ucrt64/mingw-w64-ucrt-x86_64-brotli \ ucrt64/mingw-w64-ucrt-x86_64-go \ ucrt64/mingw-w64-ucrt-x86_64-libuv \ diff --git a/src/aclk/README.md b/src/aclk/README.md index 0a260868cba5af..1f415e39f34949 100644 --- a/src/aclk/README.md +++ b/src/aclk/README.md @@ -28,18 +28,10 @@ However, to be able to offer the stunning visualizations and advanced functional ## Enable and configure the ACLK -The ACLK is enabled by default, with its settings automatically configured and stored in the Agent's memory. No file is -created at `/var/lib/netdata/cloud.d/cloud.conf` until you either connect a node or create it yourself. The default -configuration uses two settings: - -```conf -[global] - enabled = yes - cloud base url = https://app.netdata.cloud -``` +The ACLK is enabled by default, with its settings automatically configured and stored in the Agent's memory. If your Agent needs to use a proxy to access the internet, you must [set up a proxy for -connecting to cloud](/src/claim/README.md#connect-through-a-proxy). +connecting to cloud](/src/claim/README.md). You can configure following keys in the `netdata.conf` section `[cloud]`: ``` @@ -50,84 +42,3 @@ You can configure following keys in the `netdata.conf` section `[cloud]`: - `statistics` enables/disables ACLK related statistics and their charts. You can disable this to save some space in the database and slightly reduce memory usage of Netdata Agent. - `query thread count` specifies the number of threads to process cloud queries. Increasing this setting is useful for nodes with many children (streaming), which can expect to handle more queries (and/or more complicated queries). - -## Disable the ACLK - -You have two options if you prefer to disable the ACLK and not use Netdata Cloud. - -### Disable at installation - -You can pass the `--disable-cloud` parameter to the Agent installation when using a kickstart script -([kickstart.sh](/packaging/installer/methods/kickstart.md), or a [manual installation from -Git](/packaging/installer/methods/manual.md). - -When you pass this parameter, the installer does not download or compile any extra libraries. Once running, the Agent -kills the thread responsible for the ACLK and connecting behavior, and behaves as though the ACLK, and thus Netdata Cloud, -does not exist. - -### Disable at runtime - -You can change a runtime setting in your `cloud.conf` file to disable the ACLK. This setting only stops the Agent from -attempting any connection via the ACLK, but does not prevent the installer from downloading and compiling the ACLK's -dependencies. - -The file typically exists at `/var/lib/netdata/cloud.d/cloud.conf`, but can change if you set a prefix during -installation. To disable the ACLK, open that file and change the `enabled` setting to `no`: - -```conf -[global] - enabled = no -``` - -If the file at `/var/lib/netdata/cloud.d/cloud.conf` doesn't exist, you need to create it. - -Copy and paste the first two lines from below, which will change your prompt to `cat`. - -```bash -cd /var/lib/netdata/cloud.d -cat > cloud.conf << EOF -``` - -Copy and paste in lines 3-6, and after the final `EOF`, hit **Enter**. The final line must contain only `EOF`. Hit **Enter** again to return to your normal prompt with the newly-created file. - -To get your normal prompt back, the final line -must contain only `EOF`. - -```bash -[global] - enabled = no - cloud base url = https://app.netdata.cloud -EOF -``` - -You also need to change the file's permissions. Use `grep "run as user" /etc/netdata/netdata.conf` to figure out which -user your Agent runs as (typically `netdata`), and replace `netdata:netdata` as shown below if necessary: - -```bash -sudo chmod 0770 cloud.conf -sudo chown netdata:netdata cloud.conf -``` - -Restart your Agent to disable the ACLK. - -### Re-enable the ACLK - -If you first disable the ACLK and any Cloud functionality and then decide you would like to use Cloud, you must either -[reinstall Netdata](/packaging/installer/REINSTALL.md) with Cloud enabled or change the runtime setting in your -`cloud.conf` file. - -If you passed `--disable-cloud` to `netdata-installer.sh` during installation, you must -[reinstall](/packaging/installer/REINSTALL.md) your Agent. Use the same method as before, but pass `--require-cloud` to -the installer. When installation finishes you can [connect your node](/src/claim/README.md#how-to-connect-a-node). - -If you changed the runtime setting in your `var/lib/netdata/cloud.d/cloud.conf` file, edit the file again and change -`enabled` to `yes`: - -```conf -[global] - enabled = yes -``` - -Restart your Agent and [connect your node](/src/claim/README.md#how-to-connect-a-node). - - diff --git a/src/aclk/aclk.c b/src/aclk/aclk.c index 627edfc91dcbd1..f6f9a5fa9d6563 100644 --- a/src/aclk/aclk.c +++ b/src/aclk/aclk.c @@ -2,7 +2,6 @@ #include "aclk.h" -#ifdef ENABLE_ACLK #include "aclk_stats.h" #include "mqtt_websockets/mqtt_wss_client.h" #include "aclk_otp.h" @@ -14,7 +13,6 @@ #include "https_client.h" #include "schema-wrappers/schema_wrappers.h" #include "aclk_capas.h" - #include "aclk_proxy.h" #ifdef ACLK_LOG_CONVERSATION_DIR @@ -25,14 +23,35 @@ #define ACLK_STABLE_TIMEOUT 3 // Minimum delay to mark AGENT as stable -#endif /* ENABLE_ACLK */ - int aclk_pubacks_per_conn = 0; // How many PubAcks we got since MQTT conn est. int aclk_rcvd_cloud_msgs = 0; int aclk_connection_counter = 0; int disconnect_req = 0; -int aclk_connected = 0; +static bool aclk_connected = false; +static inline void aclk_set_connected(void) { + __atomic_store_n(&aclk_connected, true, __ATOMIC_RELAXED); +} +static inline void aclk_set_disconnected(void) { + __atomic_store_n(&aclk_connected, false, __ATOMIC_RELAXED); +} + +inline bool aclk_online(void) { + return __atomic_load_n(&aclk_connected, __ATOMIC_RELAXED); +} + +bool aclk_online_for_contexts(void) { + return aclk_online() && aclk_query_scope_has(HTTP_ACL_METRICS); +} + +bool aclk_online_for_alerts(void) { + return aclk_online() && aclk_query_scope_has(HTTP_ACL_ALERTS); +} + +bool aclk_online_for_nodes(void) { + return aclk_online() && aclk_query_scope_has(HTTP_ACL_NODES); +} + int aclk_ctx_based = 0; int aclk_disable_runtime = 0; int aclk_stats_enabled; @@ -49,7 +68,6 @@ float last_backoff_value = 0; time_t aclk_block_until = 0; -#ifdef ENABLE_ACLK mqtt_wss_client mqttwss_client; //netdata_mutex_t aclk_shared_state_mutex = NETDATA_MUTEX_INITIALIZER; @@ -152,19 +170,6 @@ static int load_private_key() return 1; } -static int wait_till_cloud_enabled() -{ - nd_log(NDLS_DAEMON, NDLP_INFO, - "Waiting for Cloud to be enabled"); - - while (!netdata_cloud_enabled) { - sleep_usec(USEC_PER_SEC * 1); - if (!service_running(SERVICE_ACLK)) - return 1; - } - return 0; -} - /** * Will block until agent is claimed. Returns only if agent claimed * or if agent needs to shutdown. @@ -175,14 +180,13 @@ static int wait_till_cloud_enabled() static int wait_till_agent_claimed(void) { //TODO prevent malloc and freez - char *agent_id = get_agent_claimid(); - while (likely(!agent_id)) { + ND_UUID uuid = claim_id_get_uuid(); + while (likely(UUIDiszero(uuid))) { sleep_usec(USEC_PER_SEC * 1); if (!service_running(SERVICE_ACLK)) return 1; - agent_id = get_agent_claimid(); + uuid = claim_id_get_uuid(); } - freez(agent_id); return 0; } @@ -204,7 +208,7 @@ static int wait_till_agent_claim_ready() // The NULL return means the value was never initialised, but this value has been initialized in post_conf_load. // We trap the impossible NULL here to keep the linter happy without using a fatal() in the code. - char *cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL); + const char *cloud_base_url = cloud_config_url_get(); if (cloud_base_url == NULL) { netdata_log_error("Do not move the cloud base url out of post_conf_load!!"); return 1; @@ -387,7 +391,7 @@ static inline void mqtt_connected_actions(mqtt_wss_client client) mqtt_wss_subscribe(client, topic, 1); aclk_stats_upd_online(1); - aclk_connected = 1; + aclk_set_connected(); aclk_pubacks_per_conn = 0; aclk_rcvd_cloud_msgs = 0; aclk_connection_counter++; @@ -427,7 +431,7 @@ void aclk_graceful_disconnect(mqtt_wss_client client) aclk_stats_upd_online(0); last_disconnect_time = now_realtime_sec(); - aclk_connected = 0; + aclk_set_disconnected(); nd_log(NDLS_DAEMON, NDLP_DEBUG, "Attempting to gracefully shutdown the MQTT/WSS connection"); @@ -601,7 +605,7 @@ static int aclk_attempt_to_connect(mqtt_wss_client client) #endif while (service_running(SERVICE_ACLK)) { - aclk_cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL); + aclk_cloud_base_url = cloud_config_url_get(); if (aclk_cloud_base_url == NULL) { error_report("Do not move the cloud base url out of post_conf_load!!"); aclk_status = ACLK_STATUS_NO_CLOUD_URL; @@ -817,18 +821,8 @@ void *aclk_main(void *ptr) unsigned int proto_hdl_cnt = aclk_init_rx_msg_handlers(); -#if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK ) - nd_log(NDLS_DAEMON, NDLP_INFO, - "Killing ACLK thread -> cloud functionality has been disabled"); - - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; - return NULL; -#endif query_threads.count = read_query_thread_count(); - if (wait_till_cloud_enabled()) - goto exit; - if (wait_till_agent_claim_ready()) goto exit; @@ -875,7 +869,7 @@ void *aclk_main(void *ptr) if (handle_connection(mqttwss_client)) { aclk_stats_upd_online(0); last_disconnect_time = now_realtime_sec(); - aclk_connected = 0; + aclk_set_disconnected(); nd_log(NDLS_ACCESS, NDLP_WARNING, "ACLK DISCONNECTED"); } } while (service_running(SERVICE_ACLK)); @@ -914,11 +908,11 @@ void aclk_host_state_update(RRDHOST *host, int cmd, int queryable) nd_uuid_t node_id; int ret = 0; - if (!aclk_connected) + if (!aclk_online()) return; - if (host->node_id && !uuid_is_null(*host->node_id)) { - uuid_copy(node_id, *host->node_id); + if (!uuid_is_null(host->node_id)) { + uuid_copy(node_id, host->node_id); } else { ret = get_node_id(&host->host_uuid, &node_id); @@ -931,15 +925,17 @@ void aclk_host_state_update(RRDHOST *host, int cmd, int queryable) // node_id not found aclk_query_t create_query; create_query = aclk_query_new(REGISTER_NODE); - rrdhost_aclk_state_lock(localhost); + CLAIM_ID claim_id = claim_id_get(); + node_instance_creation_t node_instance_creation = { - .claim_id = localhost->aclk_state.claimed_id, + .claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL, .hops = host->system_info->hops, .hostname = rrdhost_hostname(host), .machine_guid = host->machine_guid}; + create_query->data.bin_payload.payload = generate_node_instance_creation(&create_query->data.bin_payload.size, &node_instance_creation); - rrdhost_aclk_state_unlock(localhost); + create_query->data.bin_payload.topic = ACLK_TOPICID_CREATE_NODE; create_query->data.bin_payload.msg_name = "CreateNodeInstance"; nd_log(NDLS_DAEMON, NDLP_DEBUG, @@ -962,10 +958,9 @@ void aclk_host_state_update(RRDHOST *host, int cmd, int queryable) node_state_update.capabilities = aclk_get_agent_capas(); - rrdhost_aclk_state_lock(localhost); - node_state_update.claim_id = localhost->aclk_state.claimed_id; + CLAIM_ID claim_id = claim_id_get(); + node_state_update.claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL; query->data.bin_payload.payload = generate_node_instance_connection(&query->data.bin_payload.size, &node_state_update); - rrdhost_aclk_state_unlock(localhost); nd_log(NDLS_DAEMON, NDLP_DEBUG, "Queuing status update for node=%s, live=%d, hops=%u, queryable=%d", @@ -1007,10 +1002,9 @@ void aclk_send_node_instances() } node_state_update.capabilities = aclk_get_node_instance_capas(host); - rrdhost_aclk_state_lock(localhost); - node_state_update.claim_id = localhost->aclk_state.claimed_id; + CLAIM_ID claim_id = claim_id_get(); + node_state_update.claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL; query->data.bin_payload.payload = generate_node_instance_connection(&query->data.bin_payload.size, &node_state_update); - rrdhost_aclk_state_unlock(localhost); nd_log(NDLS_DAEMON, NDLP_DEBUG, "Queuing status update for node=%s, live=%d, hops=%d, queryable=1", @@ -1032,10 +1026,10 @@ void aclk_send_node_instances() uuid_unparse_lower(list->host_id, (char*)node_instance_creation.machine_guid); create_query->data.bin_payload.topic = ACLK_TOPICID_CREATE_NODE; create_query->data.bin_payload.msg_name = "CreateNodeInstance"; - rrdhost_aclk_state_lock(localhost); - node_instance_creation.claim_id = localhost->aclk_state.claimed_id, + + CLAIM_ID claim_id = claim_id_get(); + node_instance_creation.claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL, create_query->data.bin_payload.payload = generate_node_instance_creation(&create_query->data.bin_payload.size, &node_instance_creation); - rrdhost_aclk_state_unlock(localhost); nd_log(NDLS_DAEMON, NDLP_DEBUG, "Queuing registration for host=%s, hops=%d", @@ -1087,16 +1081,15 @@ char *aclk_state(void) ); buffer_sprintf(wb, "Protocol Used: Protobuf\nMQTT Version: %d\nClaimed: ", 5); - char *agent_id = get_agent_claimid(); - if (agent_id == NULL) + CLAIM_ID claim_id = claim_id_get(); + if (!claim_id_is_set(claim_id)) buffer_strcat(wb, "No\n"); else { - char *cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL); - buffer_sprintf(wb, "Yes\nClaimed Id: %s\nCloud URL: %s\n", agent_id, cloud_base_url ? cloud_base_url : "null"); - freez(agent_id); + const char *cloud_base_url = cloud_config_url_get(); + buffer_sprintf(wb, "Yes\nClaimed Id: %s\nCloud URL: %s\n", claim_id.str, cloud_base_url ? cloud_base_url : "null"); } - buffer_sprintf(wb, "Online: %s\nReconnect count: %d\nBanned By Cloud: %s\n", aclk_connected ? "Yes" : "No", aclk_connection_counter > 0 ? (aclk_connection_counter - 1) : 0, aclk_disable_runtime ? "Yes" : "No"); + buffer_sprintf(wb, "Online: %s\nReconnect count: %d\nBanned By Cloud: %s\n", aclk_online() ? "Yes" : "No", aclk_connection_counter > 0 ? (aclk_connection_counter - 1) : 0, aclk_disable_runtime ? "Yes" : "No"); if (last_conn_time_mqtt && (tmptr = localtime_r(&last_conn_time_mqtt, &tmbuf)) ) { char timebuf[26]; strftime(timebuf, 26, "%Y-%m-%d %H:%M:%S", tmptr); @@ -1112,13 +1105,13 @@ char *aclk_state(void) strftime(timebuf, 26, "%Y-%m-%d %H:%M:%S", tmptr); buffer_sprintf(wb, "Last Disconnect Time: %s\n", timebuf); } - if (!aclk_connected && next_connection_attempt && (tmptr = localtime_r(&next_connection_attempt, &tmbuf)) ) { + if (!aclk_online() && next_connection_attempt && (tmptr = localtime_r(&next_connection_attempt, &tmbuf)) ) { char timebuf[26]; strftime(timebuf, 26, "%Y-%m-%d %H:%M:%S", tmptr); buffer_sprintf(wb, "Next Connection Attempt At: %s\nLast Backoff: %.3f", timebuf, last_backoff_value); } - if (aclk_connected) { + if (aclk_online()) { buffer_sprintf(wb, "Received Cloud MQTT Messages: %d\nMQTT Messages Confirmed by Remote Broker (PUBACKs): %d", aclk_rcvd_cloud_msgs, aclk_pubacks_per_conn); RRDHOST *host; @@ -1127,19 +1120,17 @@ char *aclk_state(void) buffer_sprintf(wb, "\n\n> Node Instance for mGUID: \"%s\" hostname \"%s\"\n", host->machine_guid, rrdhost_hostname(host)); buffer_strcat(wb, "\tClaimed ID: "); - rrdhost_aclk_state_lock(host); - if (host->aclk_state.claimed_id) - buffer_strcat(wb, host->aclk_state.claimed_id); + claim_id = rrdhost_claim_id_get(host); + if(claim_id_is_set(claim_id)) + buffer_strcat(wb, claim_id.str); else buffer_strcat(wb, "null"); - rrdhost_aclk_state_unlock(host); - - if (host->node_id == NULL || uuid_is_null(*host->node_id)) { + if (uuid_is_null(host->node_id)) buffer_strcat(wb, "\n\tNode ID: null\n"); - } else { + else { char node_id[GUID_LEN + 1]; - uuid_unparse_lower(*host->node_id, node_id); + uuid_unparse_lower(host->node_id, node_id); buffer_sprintf(wb, "\n\tNode ID: %s\n", node_id); } @@ -1204,22 +1195,21 @@ char *aclk_state_json(void) json_object_array_add(grp, tmp); json_object_object_add(msg, "protocols-supported", grp); - char *agent_id = get_agent_claimid(); - tmp = json_object_new_boolean(agent_id != NULL); + CLAIM_ID claim_id = claim_id_get(); + tmp = json_object_new_boolean(claim_id_is_set(claim_id)); json_object_object_add(msg, "agent-claimed", tmp); - if (agent_id) { - tmp = json_object_new_string(agent_id); - freez(agent_id); - } else + if (claim_id_is_set(claim_id)) + tmp = json_object_new_string(claim_id.str); + else tmp = NULL; json_object_object_add(msg, "claimed-id", tmp); - char *cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL); + const char *cloud_base_url = cloud_config_url_get(); tmp = cloud_base_url ? json_object_new_string(cloud_base_url) : NULL; json_object_object_add(msg, "cloud-url", tmp); - tmp = json_object_new_boolean(aclk_connected); + tmp = json_object_new_boolean(aclk_online()); json_object_object_add(msg, "online", tmp); tmp = json_object_new_string("Protobuf"); @@ -1240,9 +1230,9 @@ char *aclk_state_json(void) json_object_object_add(msg, "last-connect-time-utc", timestamp_to_json(&last_conn_time_mqtt)); json_object_object_add(msg, "last-connect-time-puback-utc", timestamp_to_json(&last_conn_time_appl)); json_object_object_add(msg, "last-disconnect-time-utc", timestamp_to_json(&last_disconnect_time)); - json_object_object_add(msg, "next-connection-attempt-utc", !aclk_connected ? timestamp_to_json(&next_connection_attempt) : NULL); + json_object_object_add(msg, "next-connection-attempt-utc", !aclk_online() ? timestamp_to_json(&next_connection_attempt) : NULL); tmp = NULL; - if (!aclk_connected && last_backoff_value) + if (!aclk_online() && last_backoff_value) tmp = json_object_new_double(last_backoff_value); json_object_object_add(msg, "last-backoff-value", tmp); @@ -1262,19 +1252,18 @@ char *aclk_state_json(void) tmp = json_object_new_string(host->machine_guid); json_object_object_add(nodeinstance, "mguid", tmp); - rrdhost_aclk_state_lock(host); - if (host->aclk_state.claimed_id) { - tmp = json_object_new_string(host->aclk_state.claimed_id); + claim_id = rrdhost_claim_id_get(host); + if(claim_id_is_set(claim_id)) { + tmp = json_object_new_string(claim_id.str); json_object_object_add(nodeinstance, "claimed_id", tmp); } else json_object_object_add(nodeinstance, "claimed_id", NULL); - rrdhost_aclk_state_unlock(host); - if (host->node_id == NULL || uuid_is_null(*host->node_id)) { + if (uuid_is_null(host->node_id)) { json_object_object_add(nodeinstance, "node-id", NULL); } else { char node_id[GUID_LEN + 1]; - uuid_unparse_lower(*host->node_id, node_id); + uuid_unparse_lower(host->node_id, node_id); tmp = json_object_new_string(node_id); json_object_object_add(nodeinstance, "node-id", tmp); } @@ -1301,12 +1290,10 @@ char *aclk_state_json(void) json_object_put(msg); return str; } -#endif /* ENABLE_ACLK */ void add_aclk_host_labels(void) { RRDLABELS *labels = localhost->rrdlabels; -#ifdef ENABLE_ACLK rrdlabels_add(labels, "_aclk_available", "true", RRDLABEL_SRC_AUTO|RRDLABEL_SRC_ACLK); ACLK_PROXY_TYPE aclk_proxy; char *proxy_str; @@ -1327,9 +1314,6 @@ void add_aclk_host_labels(void) { rrdlabels_add(labels, "_mqtt_version", "5", RRDLABEL_SRC_AUTO); rrdlabels_add(labels, "_aclk_proxy", proxy_str, RRDLABEL_SRC_AUTO); rrdlabels_add(labels, "_aclk_ng_new_cloud_protocol", "true", RRDLABEL_SRC_AUTO|RRDLABEL_SRC_ACLK); -#else - rrdlabels_add(labels, "_aclk_available", "false", RRDLABEL_SRC_AUTO|RRDLABEL_SRC_ACLK); -#endif } void aclk_queue_node_info(RRDHOST *host, bool immediate) diff --git a/src/aclk/aclk.h b/src/aclk/aclk.h index 72d1a2e119fd2f..fa799c89dd6414 100644 --- a/src/aclk/aclk.h +++ b/src/aclk/aclk.h @@ -4,14 +4,12 @@ #include "daemon/common.h" -#ifdef ENABLE_ACLK #include "aclk_util.h" #include "aclk_rrdhost_state.h" // How many MQTT PUBACKs we need to get to consider connection // stable for the purposes of TBEB (truncated binary exponential backoff) #define ACLK_PUBACKS_CONN_STABLE 3 -#endif /* ENABLE_ACLK */ typedef enum __attribute__((packed)) { ACLK_STATUS_CONNECTED = 0, @@ -39,12 +37,19 @@ extern ACLK_STATUS aclk_status; extern const char *aclk_cloud_base_url; const char *aclk_status_to_string(void); -extern int aclk_connected; extern int aclk_ctx_based; extern int aclk_disable_runtime; extern int aclk_stats_enabled; extern int aclk_kill_link; +bool aclk_online(void); +bool aclk_online_for_contexts(void); +bool aclk_online_for_alerts(void); +bool aclk_online_for_nodes(void); + +void aclk_config_get_query_scope(void); +bool aclk_query_scope_has(HTTP_ACL acl); + extern time_t last_conn_time_mqtt; extern time_t last_conn_time_appl; extern time_t last_disconnect_time; @@ -59,7 +64,6 @@ extern time_t aclk_block_until; extern int aclk_connection_counter; extern int disconnect_req; -#ifdef ENABLE_ACLK void *aclk_main(void *ptr); extern netdata_mutex_t aclk_shared_state_mutex; @@ -80,8 +84,6 @@ void aclk_send_node_instances(void); void aclk_send_bin_msg(char *msg, size_t msg_len, enum aclk_topics subtopic, const char *msgname); -#endif /* ENABLE_ACLK */ - char *aclk_state(void); char *aclk_state_json(void); void add_aclk_host_labels(void); diff --git a/src/aclk/aclk_capas.c b/src/aclk/aclk_capas.c index 0f7870fddba29e..f09eb686414c53 100644 --- a/src/aclk/aclk_capas.c +++ b/src/aclk/aclk_capas.c @@ -6,6 +6,10 @@ #define HTTP_API_V2_VERSION 6 +size_t aclk_get_http_api_version(void) { + return HTTP_API_V2_VERSION; +} + const struct capability *aclk_get_agent_capas() { static struct capability agent_capabilities[] = { diff --git a/src/aclk/aclk_capas.h b/src/aclk/aclk_capas.h index c39a197b8ff9c5..d3808e640ab461 100644 --- a/src/aclk/aclk_capas.h +++ b/src/aclk/aclk_capas.h @@ -8,6 +8,7 @@ #include "schema-wrappers/capability.h" +size_t aclk_get_http_api_version(void); const struct capability *aclk_get_agent_capas(); struct capability *aclk_get_node_instance_capas(RRDHOST *host); diff --git a/src/aclk/aclk_otp.c b/src/aclk/aclk_otp.c index c9c75dd38cf25d..d3fd3dd85f0e7d 100644 --- a/src/aclk/aclk_otp.c +++ b/src/aclk/aclk_otp.c @@ -488,16 +488,15 @@ int aclk_get_mqtt_otp(RSA *p_key, char **mqtt_id, char **mqtt_usr, char **mqtt_p unsigned char *challenge = NULL; int challenge_bytes; - char *agent_id = get_agent_claimid(); - if (agent_id == NULL) { + CLAIM_ID claim_id = claim_id_get(); + if (!claim_id_is_set(claim_id)) { netdata_log_error("Agent was not claimed - cannot perform challenge/response"); return 1; } // Get Challenge - if (aclk_get_otp_challenge(target, agent_id, &challenge, &challenge_bytes)) { + if (aclk_get_otp_challenge(target, claim_id.str, &challenge, &challenge_bytes)) { netdata_log_error("Error getting challenge"); - freez(agent_id); return 1; } @@ -508,17 +507,15 @@ int aclk_get_mqtt_otp(RSA *p_key, char **mqtt_id, char **mqtt_usr, char **mqtt_p netdata_log_error("Couldn't decrypt the challenge received"); freez(response_plaintext); freez(challenge); - freez(agent_id); return 1; } freez(challenge); // Encode and Send Challenge struct auth_data data = { .client_id = NULL, .passwd = NULL, .username = NULL }; - if (aclk_send_otp_response(agent_id, response_plaintext, response_plaintext_bytes, target, &data)) { + if (aclk_send_otp_response(claim_id.str, response_plaintext, response_plaintext_bytes, target, &data)) { netdata_log_error("Error getting response"); freez(response_plaintext); - freez(agent_id); return 1; } @@ -527,7 +524,6 @@ int aclk_get_mqtt_otp(RSA *p_key, char **mqtt_id, char **mqtt_usr, char **mqtt_p *mqtt_id = data.client_id; freez(response_plaintext); - freez(agent_id); return 0; } @@ -831,17 +827,14 @@ int aclk_get_env(aclk_env_t *env, const char* aclk_hostname, int aclk_port) { req.request_type = HTTP_REQ_GET; - char *agent_id = get_agent_claimid(); - if (agent_id == NULL) - { + CLAIM_ID claim_id = claim_id_get(); + if (!claim_id_is_set(claim_id)) { netdata_log_error("Agent was not claimed - cannot perform challenge/response"); buffer_free(buf); return 1; } - buffer_sprintf(buf, "/api/v1/env?v=%s&cap=proto,ctx&claim_id=%s", &(NETDATA_VERSION[1]) /* skip 'v' at beginning */, agent_id); - - freez(agent_id); + buffer_sprintf(buf, "/api/v1/env?v=%s&cap=proto,ctx&claim_id=%s", &(NETDATA_VERSION[1]) /* skip 'v' at beginning */, claim_id.str); req.host = (char*)aclk_hostname; req.port = aclk_port; diff --git a/src/aclk/aclk_proxy.c b/src/aclk/aclk_proxy.c index 8d0e2d657f5491..a6185db7c499fb 100644 --- a/src/aclk/aclk_proxy.c +++ b/src/aclk/aclk_proxy.c @@ -79,7 +79,7 @@ static inline int check_socks_enviroment(const char **proxy) { char *tmp = getenv("socks_proxy"); - if (!tmp) + if (!tmp || !*tmp) return 1; if (aclk_verify_proxy(tmp) == PROXY_TYPE_SOCKS5) { @@ -97,7 +97,7 @@ static inline int check_http_enviroment(const char **proxy) { char *tmp = getenv("http_proxy"); - if (!tmp) + if (!tmp || !*tmp) return 1; if (aclk_verify_proxy(tmp) == PROXY_TYPE_HTTP) { @@ -113,15 +113,11 @@ static inline int check_http_enviroment(const char **proxy) const char *aclk_lws_wss_get_proxy_setting(ACLK_PROXY_TYPE *type) { - const char *proxy = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, ACLK_PROXY_CONFIG_VAR, ACLK_PROXY_ENV); - - // backward compatibility: "proxy" was in "netdata.conf" - if (config_exists(CONFIG_SECTION_CLOUD, ACLK_PROXY_CONFIG_VAR)) - proxy = config_get(CONFIG_SECTION_CLOUD, ACLK_PROXY_CONFIG_VAR, ACLK_PROXY_ENV); + const char *proxy = cloud_config_proxy_get(); *type = PROXY_DISABLED; - if (strcmp(proxy, "none") == 0) + if (!proxy || !*proxy || strcmp(proxy, "none") == 0) return proxy; if (strcmp(proxy, ACLK_PROXY_ENV) == 0) { diff --git a/src/aclk/aclk_query.c b/src/aclk/aclk_query.c index 08bc2acf3713a6..492764ffe024bb 100644 --- a/src/aclk/aclk_query.c +++ b/src/aclk/aclk_query.c @@ -7,6 +7,8 @@ #define WEB_HDR_ACCEPT_ENC "Accept-Encoding:" +static HTTP_ACL default_aclk_http_acl = HTTP_ACL_ALL_FEATURES; + pthread_cond_t query_cond_wait = PTHREAD_COND_INITIALIZER; pthread_mutex_t query_lock_wait = PTHREAD_MUTEX_INITIALIZER; #define QUERY_THREAD_LOCK pthread_mutex_lock(&query_lock_wait) @@ -24,6 +26,16 @@ struct pending_req_list { static struct pending_req_list *pending_req_list_head = NULL; static pthread_mutex_t pending_req_list_lock = PTHREAD_MUTEX_INITIALIZER; +void aclk_config_get_query_scope(void) { + const char *s = config_get(CONFIG_SECTION_CLOUD, "scope", "full"); + if(strcmp(s, "license manager") == 0) + default_aclk_http_acl = HTTP_ACL_ACLK_LICENSE_MANAGER; +} + +bool aclk_query_scope_has(HTTP_ACL acl) { + return (default_aclk_http_acl & acl) == acl; +} + static struct pending_req_list *pending_req_list_add(const char *msg_id) { struct pending_req_list *new = callocz(1, sizeof(struct pending_req_list)); @@ -106,7 +118,7 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) struct web_client *w = web_client_get_from_cache(); web_client_set_conn_cloud(w); - w->port_acl = HTTP_ACL_ACLK | HTTP_ACL_ALL_FEATURES; + w->port_acl = HTTP_ACL_ACLK | default_aclk_http_acl; w->acl = w->port_acl; web_client_set_permissions(w, HTTP_ACCESS_MAP_OLD_MEMBER, HTTP_USER_ROLE_MEMBER, WEB_CLIENT_FLAG_AUTH_CLOUD); diff --git a/src/aclk/aclk_rx_msgs.c b/src/aclk/aclk_rx_msgs.c index 8db8e3f1ecc59d..938f9bf16622ab 100644 --- a/src/aclk/aclk_rx_msgs.c +++ b/src/aclk/aclk_rx_msgs.c @@ -268,7 +268,7 @@ int create_node_instance_result(const char *msg, size_t msg_len) freez(res.node_id); return 1; } - update_node_id(&host_id, &node_id); + sql_update_node_id(&host_id, &node_id); aclk_query_t query = aclk_query_new(NODE_STATE_UPDATE); node_instance_connection_t node_state_update = { @@ -292,10 +292,9 @@ int create_node_instance_result(const char *msg, size_t msg_len) node_state_update.capabilities = aclk_get_node_instance_capas(host); } - rrdhost_aclk_state_lock(localhost); - node_state_update.claim_id = localhost->aclk_state.claimed_id; + CLAIM_ID claim_id = claim_id_get(); + node_state_update.claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL; query->data.bin_payload.payload = generate_node_instance_connection(&query->data.bin_payload.size, &node_state_update); - rrdhost_aclk_state_unlock(localhost); freez((void *)node_state_update.capabilities); diff --git a/src/aclk/aclk_tx_msgs.c b/src/aclk/aclk_tx_msgs.c index c1ed68052c6641..1060746b7698d8 100644 --- a/src/aclk/aclk_tx_msgs.c +++ b/src/aclk/aclk_tx_msgs.c @@ -219,19 +219,19 @@ uint16_t aclk_send_agent_connection_update(mqtt_wss_client client, int reachable .capabilities = aclk_get_agent_capas() }; - rrdhost_aclk_state_lock(localhost); - if (unlikely(!localhost->aclk_state.claimed_id)) { + CLAIM_ID claim_id = claim_id_get(); + if (unlikely(!claim_id_is_set(claim_id))) { netdata_log_error("Internal error. Should not come here if not claimed"); - rrdhost_aclk_state_unlock(localhost); return 0; } - if (localhost->aclk_state.prev_claimed_id) - conn.claim_id = localhost->aclk_state.prev_claimed_id; + + CLAIM_ID previous_claim_id = claim_id_get_last_working(); + if (claim_id_is_set(previous_claim_id)) + conn.claim_id = previous_claim_id.str; else - conn.claim_id = localhost->aclk_state.claimed_id; + conn.claim_id = claim_id.str; char *msg = generate_update_agent_connection(&len, &conn); - rrdhost_aclk_state_unlock(localhost); if (!msg) { netdata_log_error("Error generating agent::v1::UpdateAgentConnection payload"); @@ -239,10 +239,9 @@ uint16_t aclk_send_agent_connection_update(mqtt_wss_client client, int reachable } pid = aclk_send_bin_message_subtopic_pid(client, msg, len, ACLK_TOPICID_AGENT_CONN, "UpdateAgentConnection"); - if (localhost->aclk_state.prev_claimed_id) { - freez(localhost->aclk_state.prev_claimed_id); - localhost->aclk_state.prev_claimed_id = NULL; - } + if (claim_id_is_set(previous_claim_id)) + claim_id_clear_previous_working(); + return pid; } @@ -254,16 +253,14 @@ char *aclk_generate_lwt(size_t *size) { .capabilities = NULL }; - rrdhost_aclk_state_lock(localhost); - if (unlikely(!localhost->aclk_state.claimed_id)) { + CLAIM_ID claim_id = claim_id_get(); + if(!claim_id_is_set(claim_id)) { netdata_log_error("Internal error. Should not come here if not claimed"); - rrdhost_aclk_state_unlock(localhost); return NULL; } - conn.claim_id = localhost->aclk_state.claimed_id; + conn.claim_id = claim_id.str; char *msg = generate_update_agent_connection(size, &conn); - rrdhost_aclk_state_unlock(localhost); if (!msg) netdata_log_error("Error generating agent::v1::UpdateAgentConnection payload for LWT"); diff --git a/src/aclk/aclk_util.c b/src/aclk/aclk_util.c index 3bf2e3f188a2c4..254f490d462724 100644 --- a/src/aclk/aclk_util.c +++ b/src/aclk/aclk_util.c @@ -2,8 +2,6 @@ #include "aclk_util.h" -#ifdef ENABLE_ACLK - #include "aclk_proxy.h" #include "daemon/common.h" @@ -186,20 +184,18 @@ static void topic_generate_final(struct aclk_topic *t) { if (!replace_tag) return; - rrdhost_aclk_state_lock(localhost); - if (unlikely(!localhost->aclk_state.claimed_id)) { + CLAIM_ID claim_id = claim_id_get(); + if (unlikely(!claim_id_is_set(claim_id))) { netdata_log_error("This should never be called if agent not claimed"); - rrdhost_aclk_state_unlock(localhost); return; } - t->topic = mallocz(strlen(t->topic_recvd) + 1 - strlen(CLAIM_ID_REPLACE_TAG) + strlen(localhost->aclk_state.claimed_id)); + t->topic = mallocz(strlen(t->topic_recvd) + 1 - strlen(CLAIM_ID_REPLACE_TAG) + strlen(claim_id.str)); memcpy(t->topic, t->topic_recvd, replace_tag - t->topic_recvd); dest = t->topic + (replace_tag - t->topic_recvd); - memcpy(dest, localhost->aclk_state.claimed_id, strlen(localhost->aclk_state.claimed_id)); - dest += strlen(localhost->aclk_state.claimed_id); - rrdhost_aclk_state_unlock(localhost); + memcpy(dest, claim_id.str, strlen(claim_id.str)); + dest += strlen(claim_id.str); replace_tag += strlen(CLAIM_ID_REPLACE_TAG); strcpy(dest, replace_tag); dest += strlen(replace_tag); @@ -440,7 +436,6 @@ void aclk_set_proxy(char **ohost, int *port, char **uname, char **pwd, enum mqtt freez(proxy); } -#endif /* ENABLE_ACLK */ #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110 static EVP_ENCODE_CTX *EVP_ENCODE_CTX_new(void) diff --git a/src/aclk/aclk_util.h b/src/aclk/aclk_util.h index 6c0239cc31a0cc..3ab6f6f2edc380 100644 --- a/src/aclk/aclk_util.h +++ b/src/aclk/aclk_util.h @@ -3,8 +3,6 @@ #define ACLK_UTIL_H #include "libnetdata/libnetdata.h" - -#ifdef ENABLE_ACLK #include "mqtt_websockets/mqtt_wss_client.h" #define CLOUD_EC_MALFORMED_NODE_ID 1 @@ -114,7 +112,6 @@ unsigned long int aclk_tbeb_delay(int reset, int base, unsigned long int min, un #define aclk_tbeb_reset(x) aclk_tbeb_delay(1, 0, 0, 0) void aclk_set_proxy(char **ohost, int *port, char **uname, char **pwd, enum mqtt_wss_proxy_type *type); -#endif /* ENABLE_ACLK */ int base64_encode_helper(unsigned char *out, int *outl, const unsigned char *in, int in_len); diff --git a/src/claim/README.md b/src/claim/README.md index 51e2a9ebe40f08..019e3a7b093743 100644 --- a/src/claim/README.md +++ b/src/claim/README.md @@ -11,8 +11,7 @@ features like centralized monitoring and easier collaboration. There are two places in the UI where you can add/connect your Node: - **Space/Room settings**: Click the cogwheel (the bottom-left corner or next to the Room name at the top) and - select "Nodes." Click the "+" button to add - a new node. + select "Nodes." Click the "+" button to add a new node. - [**Nodes tab**](/docs/dashboards-and-charts/nodes-tab.md): Click on the "Add nodes" button. Netdata Cloud will generate a command that you can execute on your Node to install and claim the Agent. The command is @@ -28,12 +27,13 @@ Once you've chosen your installation method, follow the provided instructions to ### Connect an Existing Agent -There are two methods to connect an already installed Netdata Agent to your Netdata Cloud Space: +There are three methods to connect an already installed Netdata Agent to your Netdata Cloud Space: -- using the Netdata Cloud user interface (UI). -- using the claiming script. +- Manually, via the UI +- Automatically, via a provisioning system (or the command line) +- Automatically, via environment variables (e.g. kubernetes, docker, etc) -#### Using the UI (recommended) +#### Manually, via the UI The UI method is the easiest and recommended way to connect your Agent. Here's how: @@ -42,36 +42,54 @@ The UI method is the easiest and recommended way to connect your Agent. Here's h 3. Click the "Connect" button. 4. Follow the on-screen instructions to connect your Agent. -#### Using claiming script +#### Automatically, via a provisioning system or the command line -You can connect an Agent by running -the [netdata-claim.sh](https://github.com/netdata/netdata/blob/master/src/claim/netdata-claim.sh.in) script directly. -You can either run it with root privileges using `sudo` or as the user running the Agent (typically `netdata`). - -The claiming script accepts options that control the connection process. You can specify these options using the -following format: +Netdata Agents can be connected to Netdata Cloud by creating the file `/etc/netdata/claim.conf` +(or `/opt/netdata/etc/netdata/claim.conf` depending on your installation), like this: ```bash -netdata-claim.sh -OPTION=VALUE ... +[global] + url = The Netdata Cloud base URL (optional, defaults to `https://app.netdata.cloud`) + token = The claiming token for your Netdata Cloud Space (required) + rooms = A comma-separated list of Rooms to add the Agent to (optional) + proxy = The URL of a proxy server to use for the connection, or none, or env (optional, defaults to env) + insecure = Either yes or no (optional) ``` -Claiming script options: +- `proxy` can get anything libcurl accepts as proxy, or the keywords `none` and `env`. `none` or just empty disables proxy configuration, while `env` instructs libcurl to use the environment for determining proxy configuration (usually the environment variable `https_proxy`). +- `insecure` is a boolean (either `yes`, or `no`) and when set to `yes` it instructs libcurl to disable host verification. -| Option | Description | Required | Default value | -|--------|--------------------------------------------------------------------|:--------:|:------------------------------------------------------| -| token | The claiming token for your Netdata Cloud Space. | yes | | -| rooms | A comma-separated list of Rooms to add the Agent to. | no | The Agent will be added to the "All nodes" Room only. | -| id | The unique identifier of the Agent. | no | The Agent's MACHINE_GUID. | -| proxy | The URL of a proxy server to use for the connection, if necessary. | no | | - -Example: +example: ```bash -netdata-claim.sh -token=MYTOKEN1234567 -rooms=room1,room2 +[global] + url = https://app.netdata.cloud + token = NETDATA_CLOUD_SPACE_TOKEN + rooms = ROOM_KEY1,ROOM_KEY2,ROOM_KEY3 + proxy = http://username:password@myproxy:8080 + insecure = no ``` -This command connects the Agent and adds it to the "room1" and "room2" Rooms using your claiming token -MYTOKEN1234567. +If the agent is already running, you can either run `netdatacli reload-claiming-state` or restart the agent. +Otherwise, the agent will be claimed when it starts. + +If claiming fails for whatever reason, daemon.log will log the reason (search for `CLAIM`), +and also `http://ip:19999/api/v2/info` would also state the reason at the `cloud` section of the response. + +#### Automatically, via environment variables + +Netdata will use the following environment variables: + +- `NETDATA_CLAIM_URL`: The Netdata Cloud base URL (optional, defaults to `https://app.netdata.cloud`) +- `NETDATA_CLAIM_TOKEN`: The claiming token for your Netdata Cloud Space (required) +- `NETDATA_CLAIM_ROOMS`: A comma-separated list of Rooms to add the Agent to (optional) +- `NETDATA_CLAIM_PROXY`: The URL of a proxy server to use for the connection (optional) +- `NETDATA_EXTRA_CLAIM_OPTS`, may contain a space separated list of options. The option `-insecure` is the only currently used. + +The `NETDATA_CLAIM_TOKEN` alone is enough for triggering the claiming process. + +If claiming fails for whatever reason, daemon.log will log the reason (search for `CLAIM`), +and also `http://ip:19999/api/v2/info` would also state the reason at the `cloud` section of the response. ## Reconnect @@ -84,19 +102,12 @@ cd /var/lib/netdata # Replace with your Netdata library directory, if not /var sudo rm -rf cloud.d/ ``` +> IMPORTANT:
+> Keep in mind that the Agent will be **re-claimed automatically** if the environment variables or `claim.conf` exist when the agent is restarted. + This node no longer has access to the credentials it was used when connecting to Netdata Cloud via the ACLK. You will still be able to see this node in your Rooms in an **unreachable** state. -If you want to reconnect this node, you need to: - -1. Ensure that the `/var/lib/netdata/cloud.d` directory doesn't exist. In some installations, the path - is `/opt/netdata/var/lib/netdata/cloud.d` -2. Stop the Agent -3. Ensure that the `uuidgen-runtime` package is installed. Run ```echo "$(uuidgen)"``` and validate you get back a UUID -4. Copy the kickstart.sh command to add a node from your space and add to the end of it `--claim-id "$(uuidgen)"`. Run - the command and look for the message `Node was successfully claimed.` -5. Start the Agent - ### Docker based installations To remove a node from you Space in Netdata Cloud, and connect it to another Space, follow these steps: @@ -113,7 +124,6 @@ To remove a node from you Space in Netdata Cloud, and connect it to another Spac ```bash rm -rf /var/lib/netdata/cloud.d/ - rm /var/lib/netdata/registry/netdata.public.unique.id ``` @@ -123,7 +133,6 @@ To remove a node from you Space in Netdata Cloud, and connect it to another Spac ```bash docker stop CONTAINER_NAME - docker rm CONTAINER_NAME ``` @@ -163,16 +172,9 @@ Only the administrators of a Space in Netdata Cloud can trigger this action. If you're having trouble connecting a node, this may be because the [ACLK](/src/aclk/README.md) cannot connect to Cloud. -With the Netdata Agent running, visit `http://NODE:19999/api/v1/info` in your browser, replacing `NODE` with the IP -address or hostname of your Agent. The returned JSON contains four keys that will be helpful to diagnose any issues you -might be having with the ACLK or connection process. - -``` -"cloud-enabled" -"cloud-available" -"agent-claimed" -"aclk-available" -``` +With the Netdata Agent running, visit `http://NODE:19999/api/v2/info` in your browser, replacing `NODE` with the IP +address or hostname of your Agent. The returned JSON contains a section called `cloud` with helpful information to +diagnose any issues you might be having with the ACLK or connection process. > **Note** > @@ -216,28 +218,12 @@ Failed to write new machine GUID. Please make sure you have rights to write to / For a successful execution you will need to run the script with root privileges or run it with the user that is running the Agent. -### bash: netdata-claim.sh: command not found - -If you run the claiming script and see a `command not found` error, you either installed Netdata in a non-standard -location or are using an unsupported package. If you installed Netdata in a non-standard path using -the `--install-prefix` option, you need to update your `$PATH` or run `netdata-claim.sh` using the full path. - -For example, if you installed Netdata to `/opt/netdata`, use `/opt/netdata/bin/netdata-claim.sh` to run the claiming -script. - -> **Note** -> -> If you are using an unsupported package, such as a third-party `.deb`/`.rpm` package provided by your distribution, -> please remove that package and reinstall using -> -our [recommended kickstart script](/packaging/installer/methods/kickstart.md). - ### Connecting on older distributions (Ubuntu 14.04, Debian 8, CentOS 6) If you're running an older Linux distribution or one that has reached EOL, such as Ubuntu 14.04 LTS, Debian 8, or CentOS 6, your Agent may not be able to securely connect to Netdata Cloud due to an outdated version of OpenSSL. These old -versions of OpenSSL cannot perform [hostname validation](https://wiki.openssl.org/index.php/Hostname_validation), which -helps securely encrypt SSL connections. +versions of OpenSSL cannot perform [hostname validation](https://wiki.openssl.org/index.php/Hostname_validation), +which helps securely encrypt SSL connections. We recommend you reinstall Netdata with a [static build](/packaging/installer/methods/kickstart.md#static-builds), @@ -246,102 +232,3 @@ which uses an up-to-date version of OpenSSL with hostname validation enabled. If you choose to continue using the outdated version of OpenSSL, your node will still connect to Netdata Cloud, albeit with hostname verification disabled. Without verification, your Netdata Cloud connection could be vulnerable to man-in-the-middle attacks. - -### cloud-enabled is false - -If `cloud-enabled` is `false`, you probably ran the installer with `--disable-cloud` option. - -Additionally, check that the `enabled` setting in `var/lib/netdata/cloud.d/cloud.conf` is set to `true`: - -```conf -[global] - enabled = true -``` - -To fix this issue, reinstall Netdata using -your [preferred method](/packaging/installer/README.md) and do not add -the `--disable-cloud` option. - -### cloud-available is false / ACLK Available: No - -If `cloud-available` is `false` after you verified Cloud is enabled in the previous step, the most likely issue is that -Cloud features failed to build during installation. - -If Cloud features fail to build, the installer continues and finishes the process without Cloud functionality as opposed -to failing the installation altogether. - -We do this to ensure the Agent will always finish installing. - -If you can't see an explicit error in the installer's output, you can run the installer with the `--require-cloud` -option. This option causes the installation to fail if Cloud functionality can't be built and enabled, and the -installer's output should give you more error details. - -You may see one of the following error messages during installation: - -- `Failed to build libmosquitto. The install process will continue, but you will not be able to connect this node to Netdata Cloud.` -- `Unable to fetch sources for libmosquitto. The install process will continue, but you will not be able to connect this node to Netdata Cloud.` -- `Failed to build libwebsockets. The install process will continue, but you may not be able to connect this node to Netdata Cloud.` -- `Unable to fetch sources for libwebsockets. The install process will continue, but you may not be able to connect this node to Netdata Cloud.` -- `Could not find cmake, which is required to build libwebsockets. The install process will continue, but you may not be able to connect this node to Netdata Cloud.` -- `Could not find cmake, which is required to build JSON-C. The install process will continue, but Netdata Cloud support will be disabled.` -- `Failed to build JSON-C. Netdata Cloud support will be disabled.` -- `Unable to fetch sources for JSON-C. Netdata Cloud support will be disabled.` - -One common cause of the installer failing to build Cloud features is not having one of the following dependencies on -your system: `cmake`, `json-c` and `OpenSSL`, including corresponding `devel` packages. - -You can also look for error messages in `/var/log/netdata/error.log`. Try one of the following two commands to search -for ACLK-related errors. - -```bash -less /var/log/netdata/error.log -grep -i ACLK /var/log/netdata/error.log -``` - -If the installer's output does not help you enable Cloud features, contact us -by [creating an issue on GitHub](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml&title=The+installer+failed+to+prepare+the+required+dependencies+for+Netdata+Cloud+functionality) -with details about your system and relevant output from `error.log`. - -### agent-claimed is false / Claimed: No - -You must [connect your node](#connect). - -### aclk-available is false / Online: No - -If `aclk-available` is `false` and all other keys are `true`, your Agent is having trouble connecting to the Cloud -through the ACLK. Please check your system's firewall. - -If your Agent needs to use a proxy to access the internet, you must set up a proxy for connecting. - -If you are certain firewall and proxy settings are not the issue, you should consult the Agent's `error.log` -at `/var/log/netdata/error.log` and contact us -by [creating an issue on GitHub](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml&title=ACLK-available-is-false) -with details about your system and relevant output from `error.log`. - -## Connecting reference - -In the sections below, you can find reference material for the kickstart script, claiming script, connecting via the -Agent's command line tool, and details about the files found in `cloud.d`. - -### The `cloud.conf` file - -This section defines how and whether your Agent connects to Netdata Cloud using -the [Agent-Cloud link](/src/aclk/README.md)(ACLK). - -| setting | default | info | -|:---------------|:----------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------| -| enabled | yes | Controls whether the ACLK is active. Set to no to prevent the Agent from connecting to Netdata Cloud. | -| cloud base url | | The URL for the Netdata Cloud web application. Typically, this should not be changed. | -| proxy | env | Specifies the proxy setting for the ACLK. Options: none (no proxy), env (use environment's proxy), or a URL (e.g., `http://proxy.example.com:1080`). | - -### Connection directory - -Netdata stores the Agent's connection-related state in the Netdata library directory under `cloud.d`. For a default -installation, this directory exists at `/var/lib/netdata/cloud.d`. The directory and its files should be owned by the -user that runs the Agent, which is typically the `netdata` user. - -The `cloud.d/token` file should contain the claiming-token and the `cloud.d/rooms` file should contain the list of War -Rooms you added that node to. - -The user can also put the Cloud endpoint's full certificate chain in `cloud.d/cloud_fullchain.pem` so that the Agent -can trust the endpoint if necessary. diff --git a/src/claim/claim-with-api.c b/src/claim/claim-with-api.c new file mode 100644 index 00000000000000..284de5b7bbe6d0 --- /dev/null +++ b/src/claim/claim-with-api.c @@ -0,0 +1,491 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "claim.h" + +#include "registry/registry.h" + +#include +#include +#include +#include + +static bool check_and_generate_certificates() { + FILE *fp; + EVP_PKEY *pkey = NULL; + EVP_PKEY_CTX *pctx = NULL; + + CLEAN_CHAR_P *private_key_file = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "private.pem"); + CLEAN_CHAR_P *public_key_file = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "public.pem"); + + // Check if private key exists + fp = fopen(public_key_file, "r"); + if (fp) { + fclose(fp); + return true; + } + + // Generate the RSA key + pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_RSA, NULL); + if (!pctx) { + claim_agent_failure_reason_set("Cannot generate RSA key, EVP_PKEY_CTX_new_id() failed"); + return false; + } + + if (EVP_PKEY_keygen_init(pctx) <= 0) { + claim_agent_failure_reason_set("Cannot generate RSA key, EVP_PKEY_keygen_init() failed"); + EVP_PKEY_CTX_free(pctx); + return false; + } + + if (EVP_PKEY_CTX_set_rsa_keygen_bits(pctx, 2048) <= 0) { + claim_agent_failure_reason_set("Cannot generate RSA key, EVP_PKEY_CTX_set_rsa_keygen_bits() failed"); + EVP_PKEY_CTX_free(pctx); + return false; + } + + if (EVP_PKEY_keygen(pctx, &pkey) <= 0) { + claim_agent_failure_reason_set("Cannot generate RSA key, EVP_PKEY_keygen() failed"); + EVP_PKEY_CTX_free(pctx); + return false; + } + + EVP_PKEY_CTX_free(pctx); + + // Save private key + fp = fopen(private_key_file, "wb"); + if (!fp || !PEM_write_PrivateKey(fp, pkey, NULL, NULL, 0, NULL, NULL)) { + claim_agent_failure_reason_set("Cannot write private key file: %s", private_key_file); + if (fp) fclose(fp); + EVP_PKEY_free(pkey); + return false; + } + fclose(fp); + + // Save public key + fp = fopen(public_key_file, "wb"); + if (!fp || !PEM_write_PUBKEY(fp, pkey)) { + claim_agent_failure_reason_set("Cannot write public key file: %s", public_key_file); + if (fp) fclose(fp); + EVP_PKEY_free(pkey); + return false; + } + fclose(fp); + + EVP_PKEY_free(pkey); + return true; +} + +static size_t response_write_callback(void *ptr, size_t size, size_t nmemb, void *stream) { + BUFFER *wb = stream; + size_t real_size = size * nmemb; + + buffer_memcat(wb, ptr, real_size); + + return real_size; +} + +static const char *curl_add_json_room(BUFFER *wb, const char *start, const char *end) { + size_t len = end - start; + + // copy the item to an new buffer and terminate it + char buf[len + 1]; + memcpy(buf, start, len); + buf[len] = '\0'; + + // add it to the json array + const char *trimmed = trim(buf); // remove leading and trailing spaces + if(trimmed) + buffer_json_add_array_item_string(wb, trimmed); + + // prepare for the next item + start = end + 1; + + // skip multiple separators or spaces + while(*start == ',' || *start == ' ') start++; + + return start; +} + +void curl_add_rooms_json_array(BUFFER *wb, const char *rooms) { + buffer_json_member_add_array(wb, "rooms"); + if(rooms && *rooms) { + const char *start = rooms, *end = NULL; + + // Skip initial separators or spaces + while (*start == ',' || *start == ' ') + start++; + + // Process each item in the comma-separated list + while ((end = strchr(start, ',')) != NULL) + start = curl_add_json_room(wb, start, end); + + // Process the last item if any + if (*start) + curl_add_json_room(wb, start, &start[strlen(start)]); + } + buffer_json_array_close(wb); +} + +static int debug_callback(CURL *handle, curl_infotype type, char *data, size_t size, void *userptr) { + (void)handle; // Unused + (void)userptr; // Unused + + if (type == CURLINFO_TEXT) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Info: %s", data); + else if (type == CURLINFO_HEADER_OUT) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Send header: %.*s", (int)size, data); + else if (type == CURLINFO_DATA_OUT) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Send data: %.*s", (int)size, data); + else if (type == CURLINFO_SSL_DATA_OUT) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Send SSL data: %.*s", (int)size, data); + else if (type == CURLINFO_HEADER_IN) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Receive header: %.*s", (int)size, data); + else if (type == CURLINFO_DATA_IN) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Receive data: %.*s", (int)size, data); + else if (type == CURLINFO_SSL_DATA_IN) + nd_log(NDLS_DAEMON, NDLP_INFO, "CLAIM: Receive SSL data: %.*s", (int)size, data); + + return 0; +} + +static bool send_curl_request(const char *machine_guid, const char *hostname, const char *token, const char *rooms, const char *url, const char *proxy, int insecure, bool *can_retry) { + CURL *curl; + CURLcode res; + char target_url[2048]; + char public_key[2048] = ""; // Adjust size as needed + FILE *fp; + struct curl_slist *headers = NULL; + + // create a new random claim id + nd_uuid_t claimed_id; + uuid_generate_random(claimed_id); + char claimed_id_str[UUID_STR_LEN]; + uuid_unparse_lower(claimed_id, claimed_id_str); + + // generate the URL to post + snprintf(target_url, sizeof(target_url), "%s%sapi/v1/spaces/nodes/%s", + url, strendswith(url, "/") ? "" : "/", claimed_id_str); + + // Read the public key + CLEAN_CHAR_P *public_key_file = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "public.pem"); + fp = fopen(public_key_file, "r"); + if (!fp || fread(public_key, 1, sizeof(public_key), fp) == 0) { + claim_agent_failure_reason_set("cannot read public key file '%s'", public_key_file); + if (fp) fclose(fp); + *can_retry = false; + return false; + } + fclose(fp); + + // check if we have trusted.pem + // or cloud_fullchain.pem, for backwards compatibility + CLEAN_CHAR_P *trusted_key_file = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "trusted.pem"); + fp = fopen(trusted_key_file, "r"); + if(fp) + fclose(fp); + else { + freez(trusted_key_file); + trusted_key_file = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "cloud_fullchain.pem"); + fp = fopen(trusted_key_file, "r"); + if(fp) + fclose(fp); + else { + freez(trusted_key_file); + trusted_key_file = NULL; + } + } + + // generate the JSON request message + CLEAN_BUFFER *wb = buffer_create(0, NULL); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + + buffer_json_member_add_object(wb, "node"); + { + buffer_json_member_add_string(wb, "id", claimed_id_str); + buffer_json_member_add_string(wb, "hostname", hostname); + } + buffer_json_object_close(wb); // node + + buffer_json_member_add_string(wb, "token", token); + curl_add_rooms_json_array(wb, rooms); + buffer_json_member_add_string(wb, "publicKey", public_key); + buffer_json_member_add_string(wb, "mGUID", machine_guid); + buffer_json_finalize(wb); + + // initialize libcurl + curl = curl_easy_init(); + if(!curl) { + claim_agent_failure_reason_set("Cannot initialize request (curl_easy_init() failed)"); + *can_retry = true; + return false; + } + + // curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L); + curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, debug_callback); + + // we will receive the response in this + CLEAN_BUFFER *response = buffer_create(0, NULL); + + // configure the request + headers = curl_slist_append(headers, "Content-Type: application/json"); + curl_easy_setopt(curl, CURLOPT_URL, target_url); + curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "PUT"); + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, buffer_tostring(wb)); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, response_write_callback); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, response); + + if(trusted_key_file) + curl_easy_setopt(curl, CURLOPT_CAINFO, trusted_key_file); + + // Proxy configuration + if (proxy) { + if (!*proxy || strcmp(proxy, "none") == 0) + // disable proxy configuration in libcurl + curl_easy_setopt(curl, CURLOPT_PROXY, ""); + + else if (strcmp(proxy, "env") != 0) + // set the custom proxy for libcurl + curl_easy_setopt(curl, CURLOPT_PROXY, proxy); + + // otherwise, libcurl will use its own proxy environment variables + } + + // Insecure option + if (insecure) { + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L); + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0L); + } + + // Set timeout options + curl_easy_setopt(curl, CURLOPT_TIMEOUT, 10); + curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 5); + + // execute the request + res = curl_easy_perform(curl); + if (res != CURLE_OK) { + claim_agent_failure_reason_set("Request failed with error: %s", curl_easy_strerror(res)); + curl_easy_cleanup(curl); + *can_retry = true; + return false; + } + + // Get HTTP response code + long http_status_code; + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_status_code); + + bool ret = false; + if(http_status_code == 204) { + if(!cloud_conf_regenerate(claimed_id_str, machine_guid, hostname, token, rooms, url, proxy, insecure)) { + claim_agent_failure_reason_set("Failed to save claiming info to disk"); + } + else { + claim_agent_failure_reason_set(NULL); + ret = true; + } + + *can_retry = false; + } + else if (http_status_code == 422) { + if(buffer_strlen(response)) { + struct json_object *parsed_json; + struct json_object *error_key_obj; + const char *error_key = NULL; + + parsed_json = json_tokener_parse(buffer_tostring(response)); + if(parsed_json) { + if (json_object_object_get_ex(parsed_json, "errorMsgKey", &error_key_obj)) + error_key = json_object_get_string(error_key_obj); + + if (strcmp(error_key, "ErrInvalidNodeID") == 0) + claim_agent_failure_reason_set("Failed: the node id is invalid"); + else if (strcmp(error_key, "ErrInvalidNodeName") == 0) + claim_agent_failure_reason_set("Failed: the node name is invalid"); + else if (strcmp(error_key, "ErrInvalidRoomID") == 0) + claim_agent_failure_reason_set("Failed: one or more room ids are invalid"); + else if (strcmp(error_key, "ErrInvalidPublicKey") == 0) + claim_agent_failure_reason_set("Failed: the public key is invalid"); + else + claim_agent_failure_reason_set("Failed with description '%s'", error_key); + + json_object_put(parsed_json); + } + else + claim_agent_failure_reason_set("Failed with a response code %ld", http_status_code); + } + else + claim_agent_failure_reason_set("Failed with an empty response, code %ld", http_status_code); + + *can_retry = false; + } + else if(http_status_code == 102) { + claim_agent_failure_reason_set("Claiming is in progress"); + *can_retry = false; + } + else if(http_status_code == 403) { + claim_agent_failure_reason_set("Failed: token is expired, not found, or invalid"); + *can_retry = false; + } + else if(http_status_code == 409) { + claim_agent_failure_reason_set("Failed: agent is already claimed"); + *can_retry = false; + } + else if(http_status_code == 500) { + claim_agent_failure_reason_set("Failed: received Internal Server Error"); + *can_retry = true; + } + else if(http_status_code == 503) { + claim_agent_failure_reason_set("Failed: Netdata Cloud is unavailable"); + *can_retry = true; + } + else if(http_status_code == 504) { + claim_agent_failure_reason_set("Failed: Gateway Timeout"); + *can_retry = true; + } + else { + claim_agent_failure_reason_set("Failed with response code %ld", http_status_code); + *can_retry = true; + } + + curl_easy_cleanup(curl); + return ret; +} + +bool claim_agent(const char *url, const char *token, const char *rooms, const char *proxy, bool insecure) { + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + spinlock_lock(&spinlock); + + if (!check_and_generate_certificates()) { + spinlock_unlock(&spinlock); + return false; + } + + bool done = false, can_retry = true; + size_t retries = 0; + do { + done = send_curl_request(registry_get_this_machine_guid(), registry_get_this_machine_hostname(), token, rooms, url, proxy, insecure, &can_retry); + if (done) break; + sleep_usec(300 * USEC_PER_MS + 100 * retries * USEC_PER_MS); + retries++; + } while(can_retry && retries < 5); + + spinlock_unlock(&spinlock); + return done; +} + +bool claim_agent_from_environment(void) { + const char *url = getenv("NETDATA_CLAIM_URL"); + if(!url || !*url) { + url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "url", DEFAULT_CLOUD_BASE_URL); + if(!url || !*url) return false; + } + + const char *token = getenv("NETDATA_CLAIM_TOKEN"); + if(!token || !*token) + return false; + + const char *rooms = getenv("NETDATA_CLAIM_ROOMS"); + if(!rooms) + rooms = ""; + + const char *proxy = getenv("NETDATA_CLAIM_PROXY"); + if(!proxy || !*proxy) + proxy = ""; + + bool insecure = CONFIG_BOOLEAN_NO; + const char *from_env = getenv("NETDATA_EXTRA_CLAIM_OPTS"); + if(from_env && *from_env && strstr(from_env, "-insecure") == 0) + insecure = CONFIG_BOOLEAN_YES; + + return claim_agent(url, token, rooms, proxy, insecure); +} + +bool claim_agent_from_claim_conf(void) { + static struct config claim_config = { + .first_section = NULL, + .last_section = NULL, + .mutex = NETDATA_MUTEX_INITIALIZER, + .index = { + .avl_tree = { + .root = NULL, + .compar = appconfig_section_compare + }, + .rwlock = AVL_LOCK_INITIALIZER + } + }; + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + bool ret = false; + + spinlock_lock(&spinlock); + + errno_clear(); + char *filename = filename_from_path_entry_strdupz(netdata_configured_user_config_dir, "claim.conf"); + bool loaded = appconfig_load(&claim_config, filename, 1, NULL); + freez(filename); + + if(loaded) { + const char *url = appconfig_get(&claim_config, CONFIG_SECTION_GLOBAL, "url", DEFAULT_CLOUD_BASE_URL); + const char *token = appconfig_get(&claim_config, CONFIG_SECTION_GLOBAL, "token", ""); + const char *rooms = appconfig_get(&claim_config, CONFIG_SECTION_GLOBAL, "rooms", ""); + const char *proxy = appconfig_get(&claim_config, CONFIG_SECTION_GLOBAL, "proxy", ""); + bool insecure = appconfig_get_boolean(&claim_config, CONFIG_SECTION_GLOBAL, "insecure", CONFIG_BOOLEAN_NO); + + if(token && *token && url && *url) + ret = claim_agent(url, token, rooms, proxy, insecure); + } + + spinlock_unlock(&spinlock); + + return ret; +} + +bool claim_agent_from_split_files(void) { + char filename[FILENAME_MAX + 1]; + + snprintfz(filename, sizeof(filename), "%s/token", netdata_configured_cloud_dir); + long token_len = 0; + char *token = read_by_filename(filename, &token_len); + if(!token || !*token) + return false; + + snprintfz(filename, sizeof(filename), "%s/rooms", netdata_configured_cloud_dir); + long rooms_len = 0; + char *rooms = read_by_filename(filename, &rooms_len); + if(!rooms || !*rooms) + rooms = NULL; + + bool ret = claim_agent(cloud_config_url_get(), token, rooms, cloud_config_proxy_get(), cloud_config_insecure_get()); + + if(ret) { + snprintfz(filename, sizeof(filename), "%s/token", netdata_configured_cloud_dir); + unlink(filename); + + snprintfz(filename, sizeof(filename), "%s/rooms", netdata_configured_cloud_dir); + unlink(filename); + } + + return ret; +} + +bool claim_agent_automatically(void) { + // Use /etc/netdata/claim.conf + + if(claim_agent_from_claim_conf()) + return true; + + // Users may set NETDATA_CLAIM_TOKEN and NETDATA_CLAIM_ROOMS + // A good choice for docker container users. + + if(claim_agent_from_environment()) + return true; + + // Users may store token and rooms in /var/lib/netdata/cloud.d + // This was a bad choice, since users may have to create this directory + // which may end up with the wrong permissions, preventing netdata from storing + // the required information there. + + if(claim_agent_from_split_files()) + return true; + + return false; +} diff --git a/src/claim/claim.c b/src/claim/claim.c index 5383aac3709e7f..b3c4f9e7b3bf53 100644 --- a/src/claim/claim.c +++ b/src/claim/claim.c @@ -1,470 +1,209 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "claim.h" -#include "registry/registry_internals.h" -#include "aclk/aclk.h" -#include "aclk/aclk_proxy.h" - -char *claiming_pending_arguments = NULL; - -static char *claiming_errors[] = { - "Agent claimed successfully", // 0 - "Unknown argument", // 1 - "Problems with claiming working directory", // 2 - "Missing dependencies", // 3 - "Failure to connect to endpoint", // 4 - "The CLI didn't work", // 5 - "Wrong user", // 6 - "Unknown HTTP error message", // 7 - "invalid node id", // 8 - "invalid node name", // 9 - "invalid room id", // 10 - "invalid public key", // 11 - "token expired/token not found/invalid token", // 12 - "already claimed", // 13 - "processing claiming", // 14 - "Internal Server Error", // 15 - "Gateway Timeout", // 16 - "Service Unavailable", // 17 - "Agent Unique Id Not Readable" // 18 -}; - -/* Retrieve the claim id for the agent. - * Caller owns the string. -*/ -char *get_agent_claimid() -{ - char *result; - rrdhost_aclk_state_lock(localhost); - result = (localhost->aclk_state.claimed_id == NULL) ? NULL : strdupz(localhost->aclk_state.claimed_id); - rrdhost_aclk_state_unlock(localhost); - return result; -} - -#define CLAIMING_COMMAND_LENGTH 16384 -#define CLAIMING_PROXY_LENGTH (CLAIMING_COMMAND_LENGTH/4) -/* rrd_init() and post_conf_load() must have been called before this function */ -CLAIM_AGENT_RESPONSE claim_agent(const char *claiming_arguments, bool force, const char **msg __maybe_unused) -{ - if (!force || !netdata_cloud_enabled) { - netdata_log_error("Refusing to claim agent -> cloud functionality has been disabled"); - return CLAIM_AGENT_CLOUD_DISABLED; - } +// -------------------------------------------------------------------------------------------------------------------- +// keep track of the last claiming failure reason -#ifndef DISABLE_CLOUD - char command_exec_buffer[CLAIMING_COMMAND_LENGTH + 1]; - char command_line_buffer[CLAIMING_COMMAND_LENGTH + 1]; +static char cloud_claim_failure_reason[4096] = ""; - // This is guaranteed to be set early in main via post_conf_load() - char *cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL); - if (cloud_base_url == NULL) { - internal_fatal(true, "Do not move the cloud base url out of post_conf_load!!"); - return CLAIM_AGENT_NO_CLOUD_URL; +void claim_agent_failure_reason_set(const char *format, ...) { + if(!format || !*format) { + cloud_claim_failure_reason[0] = '\0'; + return; } - const char *proxy_str; - ACLK_PROXY_TYPE proxy_type; - char proxy_flag[CLAIMING_PROXY_LENGTH] = "-noproxy"; - - proxy_str = aclk_get_proxy(&proxy_type); - - if (proxy_type == PROXY_TYPE_SOCKS5 || proxy_type == PROXY_TYPE_HTTP) - snprintf(proxy_flag, CLAIMING_PROXY_LENGTH, "-proxy=\"%s\"", proxy_str); - - snprintfz(command_exec_buffer, CLAIMING_COMMAND_LENGTH, - "exec \"%s%snetdata-claim.sh\"", - netdata_exe_path[0] ? netdata_exe_path : "", - netdata_exe_path[0] ? "/" : "" - ); - - snprintfz(command_line_buffer, - CLAIMING_COMMAND_LENGTH, - "%s %s -hostname=%s -id=%s -url=%s -noreload %s", - command_exec_buffer, - proxy_flag, - netdata_configured_hostname, - localhost->machine_guid, - cloud_base_url, - claiming_arguments); - - netdata_log_info("Executing agent claiming command: %s", command_exec_buffer); - POPEN_INSTANCE *instance = spawn_popen_run(command_line_buffer); - if(!instance) { - netdata_log_error("Cannot popen(\"%s\").", command_exec_buffer); - return CLAIM_AGENT_CANNOT_EXECUTE_CLAIM_SCRIPT; - } + va_list args; + va_start(args, format); + vsnprintf(cloud_claim_failure_reason, sizeof(cloud_claim_failure_reason), format, args); + va_end(args); + + nd_log(NDLS_DAEMON, NDLP_ERR, + "CLAIM: %s", cloud_claim_failure_reason); +} - netdata_log_info("Waiting for claiming command '%s' to finish.", command_exec_buffer); - char read_buffer[100 + 1]; - while (fgets(read_buffer, 100, instance->child_stdout_fp) != NULL) ; +const char *claim_agent_failure_reason_get(void) { + if(!cloud_claim_failure_reason[0]) + return "Agent is not claimed yet"; + else + return cloud_claim_failure_reason; +} - int exit_code = spawn_popen_wait(instance); +// -------------------------------------------------------------------------------------------------------------------- +// claimed_id load/save - netdata_log_info("Agent claiming command '%s' returned with code %d", command_exec_buffer, exit_code); - if (0 == exit_code) { - load_claiming_state(); - return CLAIM_AGENT_OK; - } - if (exit_code < 0) { - netdata_log_error("Agent claiming command '%s' failed to complete its run", command_exec_buffer); - return CLAIM_AGENT_CLAIM_SCRIPT_FAILED; +bool claimed_id_save_to_file(const char *claimed_id_str) { + bool ret; + const char *filename = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "claimed_id"); + FILE *fp = fopen(filename, "w"); + if(fp) { + fprintf(fp, "%s", claimed_id_str); + fclose(fp); + ret = true; } - errno_clear(); - unsigned maximum_known_exit_code = sizeof(claiming_errors) / sizeof(claiming_errors[0]) - 1; - - if ((unsigned)exit_code > maximum_known_exit_code) { - netdata_log_error("Agent failed to be claimed with an unknown error. Cmd: '%s'", command_exec_buffer); - return CLAIM_AGENT_CLAIM_SCRIPT_RETURNED_INVALID_CODE; + else { + nd_log(NDLS_DAEMON, NDLP_ERR, + "CLAIM: cannot open file '%s' for writing.", filename); + ret = false; } - netdata_log_error("Agent failed to be claimed using the command '%s' with the following error message: %s", - command_exec_buffer, claiming_errors[exit_code]); + freez((void *)filename); + return ret; +} - if(msg) *msg = claiming_errors[exit_code]; +static ND_UUID claimed_id_parse(const char *claimed_id, const char *source) { + ND_UUID uuid; -#else - UNUSED(claiming_arguments); - UNUSED(claiming_errors); -#endif + if(uuid_parse_flexi(claimed_id, uuid.uuid) != 0) { + uuid = UUID_ZERO; + nd_log(NDLS_DAEMON, NDLP_ERR, + "CLAIM: claimed_id '%s' (loaded from '%s'), is not a valid UUID.", + claimed_id, source); + } - return CLAIM_AGENT_FAILED_WITH_MESSAGE; + return uuid; } -/* Change the claimed state of the agent. - * - * This only happens when the user has explicitly requested it: - * - via the cli tool by reloading the claiming state - * - after spawning the claim because of a command-line argument - * If this happens with the ACLK active under an old claim then we MUST KILL THE LINK - */ -void load_claiming_state(void) -{ - // -------------------------------------------------------------------- - // Check if the cloud is enabled -#if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK ) - netdata_cloud_enabled = false; -#else - nd_uuid_t uuid; - - // Propagate into aclk and registry. Be kind of atomic... - appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", DEFAULT_CLOUD_BASE_URL); - - rrdhost_aclk_state_lock(localhost); - if (localhost->aclk_state.claimed_id) { - if (aclk_connected) - localhost->aclk_state.prev_claimed_id = strdupz(localhost->aclk_state.claimed_id); - freez(localhost->aclk_state.claimed_id); - localhost->aclk_state.claimed_id = NULL; - } - if (aclk_connected) - { - netdata_log_info("Agent was already connected to Cloud - forcing reconnection under new credentials"); - aclk_kill_link = 1; - } - aclk_disable_runtime = 0; - - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/cloud.d/claimed_id", netdata_configured_varlib_dir); +static ND_UUID claimed_id_load_from_file(void) { + ND_UUID uuid; long bytes_read; + const char *filename = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "claimed_id"); char *claimed_id = read_by_filename(filename, &bytes_read); - if(claimed_id && uuid_parse(claimed_id, uuid)) { - netdata_log_error("claimed_id \"%s\" doesn't look like valid UUID", claimed_id); - freez(claimed_id); - claimed_id = NULL; - } - - if(claimed_id) { - localhost->aclk_state.claimed_id = mallocz(UUID_STR_LEN); - uuid_unparse_lower(uuid, localhost->aclk_state.claimed_id); - } - - rrdhost_aclk_state_unlock(localhost); - invalidate_node_instances(&localhost->host_uuid, claimed_id ? &uuid : NULL); - metaqueue_store_claim_id(&localhost->host_uuid, claimed_id ? &uuid : NULL); - if (!claimed_id) { - netdata_log_info("Unable to load '%s', setting state to AGENT_UNCLAIMED", filename); - return; - } + if(!claimed_id) + uuid = UUID_ZERO; + else + uuid = claimed_id_parse(claimed_id, filename); freez(claimed_id); - - netdata_log_info("File '%s' was found. Setting state to AGENT_CLAIMED.", filename); - netdata_cloud_enabled = appconfig_get_boolean_ondemand(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", netdata_cloud_enabled); -#endif -} - -struct config cloud_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; - -void load_cloud_conf(int silent) -{ - char *nd_disable_cloud = getenv("NETDATA_DISABLE_CLOUD"); - if (nd_disable_cloud && !strncmp(nd_disable_cloud, "1", 1)) - netdata_cloud_enabled = CONFIG_BOOLEAN_NO; - - char *filename; - errno_clear(); - - int ret = 0; - - filename = strdupz_path_subpath(netdata_configured_varlib_dir, "cloud.d/cloud.conf"); - - ret = appconfig_load(&cloud_config, filename, 1, NULL); - if(!ret && !silent) - netdata_log_info("CONFIG: cannot load cloud config '%s'. Running with internal defaults.", filename); - - freez(filename); - - // -------------------------------------------------------------------- - // Check if the cloud is enabled - -#if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK ) - netdata_cloud_enabled = CONFIG_BOOLEAN_NO; -#else - netdata_cloud_enabled = appconfig_get_boolean_ondemand(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", netdata_cloud_enabled); -#endif - - // This must be set before any point in the code that accesses it. Do not move it from this function. - appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", DEFAULT_CLOUD_BASE_URL); + freez((void *)filename); + return uuid; } -static char *netdata_random_session_id_filename = NULL; -static nd_uuid_t netdata_random_session_id = { 0 }; - -bool netdata_random_session_id_generate(void) { - static char guid[UUID_STR_LEN] = ""; - - uuid_generate_random(netdata_random_session_id); - uuid_unparse_lower(netdata_random_session_id, guid); - - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/netdata_random_session_id", netdata_configured_varlib_dir); - - bool ret = true; - - (void)unlink(filename); - - // save it - int fd = open(filename, O_WRONLY|O_CREAT|O_TRUNC|O_CLOEXEC, 640); - if(fd == -1) { - netdata_log_error("Cannot create random session id file '%s'.", filename); - ret = false; +static ND_UUID claimed_id_get_from_cloud_conf(void) { + if(appconfig_exists(&cloud_config, CONFIG_SECTION_GLOBAL, "claimed_id")) { + const char *claimed_id = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "claimed_id", ""); + if(claimed_id && *claimed_id) + return claimed_id_parse(claimed_id, "cloud.conf"); } - else { - if (write(fd, guid, UUID_STR_LEN - 1) != UUID_STR_LEN - 1) { - netdata_log_error("Cannot write the random session id file '%s'.", filename); - ret = false; - } else { - ssize_t bytes = write(fd, "\n", 1); - UNUSED(bytes); - } - close(fd); - } - - if(ret && (!netdata_random_session_id_filename || strcmp(netdata_random_session_id_filename, filename) != 0)) { - freez(netdata_random_session_id_filename); - netdata_random_session_id_filename = strdupz(filename); - } - - return ret; + return UUID_ZERO; } -const char *netdata_random_session_id_get_filename(void) { - if(!netdata_random_session_id_filename) - netdata_random_session_id_generate(); +static ND_UUID claimed_id_load(void) { + ND_UUID uuid = claimed_id_get_from_cloud_conf(); + if(UUIDiszero(uuid)) + uuid = claimed_id_load_from_file(); - return netdata_random_session_id_filename; + return uuid; } -bool netdata_random_session_id_matches(const char *guid) { - if(uuid_is_null(netdata_random_session_id)) - return false; +bool is_agent_claimed(void) { + ND_UUID uuid = claim_id_get_uuid(); + return !UUIDiszero(uuid); +} - nd_uuid_t uuid; +// -------------------------------------------------------------------------------------------------------------------- - if(uuid_parse(guid, uuid)) +bool claim_id_matches(const char *claim_id) { + ND_UUID this_one = UUID_ZERO; + if(uuid_parse_flexi(claim_id, this_one.uuid) != 0 || UUIDiszero(this_one)) return false; - if(uuid_compare(netdata_random_session_id, uuid) == 0) + ND_UUID having = claim_id_get_uuid(); + if(!UUIDiszero(having) && UUIDeq(having, this_one)) return true; return false; } -static bool check_claim_param(const char *s) { - if(!s || !*s) return true; +bool claim_id_matches_any(const char *claim_id) { + ND_UUID this_one = UUID_ZERO; + if(uuid_parse_flexi(claim_id, this_one.uuid) != 0 || UUIDiszero(this_one)) + return false; - do { - if(isalnum((uint8_t)*s) || *s == '.' || *s == ',' || *s == '-' || *s == ':' || *s == '/' || *s == '_') - ; - else - return false; + ND_UUID having = claim_id_get_uuid(); + if(!UUIDiszero(having) && UUIDeq(having, this_one)) + return true; - } while(*++s); + having = localhost->aclk.claim_id_of_parent; + if(!UUIDiszero(having) && UUIDeq(having, this_one)) + return true; - return true; -} + having = localhost->aclk.claim_id_of_origin; + if(!UUIDiszero(having) && UUIDeq(having, this_one)) + return true; -void claim_reload_all(void) { - nd_log_limits_unlimited(); - load_claiming_state(); - registry_update_cloud_base_url(); - rrdpush_send_claimed_id(localhost); - nd_log_limits_reset(); + return false; } -int api_v2_claim(struct web_client *w, char *url) { - char *key = NULL; - char *token = NULL; - char *rooms = NULL; - char *base_url = NULL; - - while (url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) continue; - if (!value || !*value) continue; - - if(!strcmp(name, "key")) - key = value; - else if(!strcmp(name, "token")) - token = value; - else if(!strcmp(name, "rooms")) - rooms = value; - else if(!strcmp(name, "url")) - base_url = value; +/* Change the claimed state of the agent. + * + * This only happens when the user has explicitly requested it: + * - via the cli tool by reloading the claiming state + * - after spawning the claim because of a command-line argument + * If this happens with the ACLK active under an old claim then we MUST KILL THE LINK + */ +bool load_claiming_state(void) { + if (aclk_online()) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "CLAIM: agent was already connected to NC - forcing reconnection under new credentials"); + aclk_kill_link = 1; + } + aclk_disable_runtime = 0; + + ND_UUID uuid = claimed_id_load(); + if(UUIDiszero(uuid)) { + // not found + if(claim_agent_automatically()) + uuid = claimed_id_load(); } - BUFFER *wb = w->response.data; - buffer_flush(wb); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - - time_t now_s = now_realtime_sec(); - CLOUD_STATUS status = buffer_json_cloud_status(wb, now_s); - - bool can_be_claimed = false; - switch(status) { - case CLOUD_STATUS_AVAILABLE: - case CLOUD_STATUS_DISABLED: - case CLOUD_STATUS_OFFLINE: - can_be_claimed = true; - break; - - case CLOUD_STATUS_UNAVAILABLE: - case CLOUD_STATUS_BANNED: - case CLOUD_STATUS_ONLINE: - can_be_claimed = false; - break; + bool have_claimed_id = false; + if(!UUIDiszero(uuid)) { + // we go it somehow + claim_id_set(uuid); + have_claimed_id = true; } - buffer_json_member_add_boolean(wb, "can_be_claimed", can_be_claimed); - - if(can_be_claimed && key) { - if(!netdata_random_session_id_matches(key)) { - buffer_reset(wb); - buffer_strcat(wb, "invalid key"); - netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it - return HTTP_RESP_FORBIDDEN; - } - - if(!token || !base_url || !check_claim_param(token) || !check_claim_param(base_url) || (rooms && !check_claim_param(rooms))) { - buffer_reset(wb); - buffer_strcat(wb, "invalid parameters"); - netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it - return HTTP_RESP_BAD_REQUEST; - } - - netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it - - netdata_cloud_enabled = CONFIG_BOOLEAN_AUTO; - appconfig_set_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", CONFIG_BOOLEAN_AUTO); - appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", base_url); - - nd_uuid_t claimed_id; - uuid_generate_random(claimed_id); - char claimed_id_str[UUID_STR_LEN]; - uuid_unparse_lower(claimed_id, claimed_id_str); - - BUFFER *t = buffer_create(1024, NULL); - if(rooms) - buffer_sprintf(t, "-id=%s -token=%s -rooms=%s", claimed_id_str, token, rooms); - else - buffer_sprintf(t, "-id=%s -token=%s", claimed_id_str, token); - - bool success = false; - const char *msg = NULL; - CLAIM_AGENT_RESPONSE rc = claim_agent(buffer_tostring(t), true, &msg); - switch(rc) { - case CLAIM_AGENT_OK: - msg = "ok"; - success = true; - can_be_claimed = false; - claim_reload_all(); - { - int ms = 0; - do { - status = cloud_status(); - if (status == CLOUD_STATUS_ONLINE && __atomic_load_n(&localhost->node_id, __ATOMIC_RELAXED)) - break; - - sleep_usec(50 * USEC_PER_MS); - ms += 50; - } while (ms < 10000); - } - break; + invalidate_node_instances(&localhost->host_uuid, have_claimed_id ? &uuid.uuid : NULL); + metaqueue_store_claim_id(&localhost->host_uuid, have_claimed_id ? &uuid.uuid : NULL); - case CLAIM_AGENT_NO_CLOUD_URL: - msg = "No Netdata Cloud URL."; - break; + errno_clear(); - case CLAIM_AGENT_CLAIM_SCRIPT_FAILED: - msg = "Claiming script failed."; - break; + if (!have_claimed_id) + nd_log(NDLS_DAEMON, NDLP_ERR, + "CLAIM: Unable to find our claimed_id, setting state to AGENT_UNCLAIMED"); + else + nd_log(NDLS_DAEMON, NDLP_INFO, + "CLAIM: Found a valid claimed_id, setting state to AGENT_CLAIMED"); - case CLAIM_AGENT_CLOUD_DISABLED: - msg = "Netdata Cloud is disabled on this agent."; - break; + return have_claimed_id; +} - case CLAIM_AGENT_CANNOT_EXECUTE_CLAIM_SCRIPT: - msg = "Failed to execute claiming script."; - break; +CLOUD_STATUS claim_reload_and_wait_online(void) { + nd_log(NDLS_DAEMON, NDLP_INFO, + "CLAIM: Reloading Agent Claiming configuration."); - case CLAIM_AGENT_CLAIM_SCRIPT_RETURNED_INVALID_CODE: - msg = "Claiming script returned invalid code."; - break; + nd_log_limits_unlimited(); + cloud_conf_load(0); + bool claimed = load_claiming_state(); + registry_update_cloud_base_url(); + rrdpush_sender_send_claimed_id(localhost); + nd_log_limits_reset(); - default: - case CLAIM_AGENT_FAILED_WITH_MESSAGE: - if(!msg) - msg = "Unknown error"; + CLOUD_STATUS status = cloud_status(); + if(claimed) { + int ms = 0; + do { + status = cloud_status(); + if ((status == CLOUD_STATUS_ONLINE || status == CLOUD_STATUS_INDIRECT) && !uuid_is_null(localhost->host_uuid)) break; - } - - // our status may have changed - // refresh the status in our output - buffer_flush(wb); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - now_s = now_realtime_sec(); - buffer_json_cloud_status(wb, now_s); - - // and this is the status of the claiming command we run - buffer_json_member_add_boolean(wb, "success", success); - buffer_json_member_add_string(wb, "message", msg); - } - - if(can_be_claimed) - buffer_json_member_add_string(wb, "key_filename", netdata_random_session_id_get_filename()); - buffer_json_agents_v2(wb, NULL, now_s, false, false); - buffer_json_finalize(wb); + sleep_usec(50 * USEC_PER_MS); + ms += 50; + } while (ms < 10000); + } - return HTTP_RESP_OK; + return status; } diff --git a/src/claim/claim.h b/src/claim/claim.h index ccab8aaa168731..073771d1c31c65 100644 --- a/src/claim/claim.h +++ b/src/claim/claim.h @@ -4,29 +4,32 @@ #define NETDATA_CLAIM_H 1 #include "daemon/common.h" +#include "cloud-status.h" +#include "claim_id.h" + +const char *claim_agent_failure_reason_get(void); +void claim_agent_failure_reason_set(const char *format, ...) PRINTFLIKE(1, 2); -extern char *claiming_pending_arguments; extern struct config cloud_config; -typedef enum __attribute__((packed)) { - CLAIM_AGENT_OK, - CLAIM_AGENT_CLOUD_DISABLED, - CLAIM_AGENT_NO_CLOUD_URL, - CLAIM_AGENT_CANNOT_EXECUTE_CLAIM_SCRIPT, - CLAIM_AGENT_CLAIM_SCRIPT_FAILED, - CLAIM_AGENT_CLAIM_SCRIPT_RETURNED_INVALID_CODE, - CLAIM_AGENT_FAILED_WITH_MESSAGE, -} CLAIM_AGENT_RESPONSE; - -CLAIM_AGENT_RESPONSE claim_agent(const char *claiming_arguments, bool force, const char **msg); -char *get_agent_claimid(void); -void load_claiming_state(void); -void load_cloud_conf(int silent); -void claim_reload_all(void); - -bool netdata_random_session_id_generate(void); -const char *netdata_random_session_id_get_filename(void); -bool netdata_random_session_id_matches(const char *guid); -int api_v2_claim(struct web_client *w, char *url); +bool claim_agent(const char *url, const char *token, const char *rooms, const char *proxy, bool insecure); +bool claim_agent_automatically(void); + +bool claimed_id_save_to_file(const char *claimed_id_str); + +bool is_agent_claimed(void); +bool claim_id_matches(const char *claim_id); +bool claim_id_matches_any(const char *claim_id); +bool load_claiming_state(void); +void cloud_conf_load(int silent); +void cloud_conf_init_after_registry(void); +bool cloud_conf_save(void); +bool cloud_conf_regenerate(const char *claimed_id_str, const char *machine_guid, const char *hostname, const char *token, const char *rooms, const char *url, const char *proxy, int insecure); +CLOUD_STATUS claim_reload_and_wait_online(void); + +const char *cloud_config_url_get(void); +void cloud_config_url_set(const char *url); +const char *cloud_config_proxy_get(void); +bool cloud_config_insecure_get(void); #endif //NETDATA_CLAIM_H diff --git a/src/claim/claim_id.c b/src/claim/claim_id.c new file mode 100644 index 00000000000000..dd79eb640d5f2a --- /dev/null +++ b/src/claim/claim_id.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "claim_id.h" + +static struct { + SPINLOCK spinlock; + ND_UUID claim_uuid; + ND_UUID claim_uuid_saved; +} claim = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, +}; + +void claim_id_clear_previous_working(void) { + spinlock_lock(&claim.spinlock); + claim.claim_uuid_saved = UUID_ZERO; + spinlock_unlock(&claim.spinlock); +} + +void claim_id_set(ND_UUID new_claim_id) { + spinlock_lock(&claim.spinlock); + + if(!UUIDiszero(claim.claim_uuid)) { + if(aclk_online()) + claim.claim_uuid_saved = claim.claim_uuid; + claim.claim_uuid = UUID_ZERO; + } + + claim.claim_uuid = new_claim_id; + if(localhost) + localhost->aclk.claim_id_of_origin = claim.claim_uuid; + + spinlock_unlock(&claim.spinlock); +} + +// returns true when the supplied str is a valid UUID. +// giving NULL, an empty string, or "NULL" is valid. +bool claim_id_set_str(const char *claim_id_str) { + bool rc; + + ND_UUID uuid; + if(!claim_id_str || !*claim_id_str || strcmp(claim_id_str, "NULL") == 0) { + uuid = UUID_ZERO, + rc = true; + } + else + rc = uuid_parse(claim_id_str, uuid.uuid) == 0; + + claim_id_set(uuid); + + return rc; +} + +ND_UUID claim_id_get_uuid(void) { + static ND_UUID uuid; + spinlock_lock(&claim.spinlock); + uuid = claim.claim_uuid; + spinlock_unlock(&claim.spinlock); + return uuid; +} + +void claim_id_get_str(char str[UUID_STR_LEN]) { + ND_UUID uuid = claim_id_get_uuid(); + + if(UUIDiszero(uuid)) + memset(str, 0, UUID_STR_LEN); + else + uuid_unparse_lower(uuid.uuid, str); +} + +const char *claim_id_get_str_mallocz(void) { + char *str = mallocz(UUID_STR_LEN); + claim_id_get_str(str); + return str; +} + +CLAIM_ID claim_id_get(void) { + CLAIM_ID ret = { + .uuid = claim_id_get_uuid(), + }; + + if(claim_id_is_set(ret)) + uuid_unparse_lower(ret.uuid.uuid, ret.str); + else + ret.str[0] = '\0'; + + return ret; +} + +CLAIM_ID claim_id_get_last_working(void) { + CLAIM_ID ret = { 0 }; + + spinlock_lock(&claim.spinlock); + ret.uuid = claim.claim_uuid_saved; + spinlock_unlock(&claim.spinlock); + + if(claim_id_is_set(ret)) + uuid_unparse_lower(ret.uuid.uuid, ret.str); + else + ret.str[0] = '\0'; + + return ret; +} + +CLAIM_ID rrdhost_claim_id_get(RRDHOST *host) { + CLAIM_ID ret = { 0 }; + + if(host == localhost) { + ret.uuid = claim_id_get_uuid(); + if(UUIDiszero(ret.uuid)) + ret.uuid = host->aclk.claim_id_of_parent; + } + else { + if (!UUIDiszero(host->aclk.claim_id_of_origin)) + ret.uuid = host->aclk.claim_id_of_origin; + else + ret.uuid = host->aclk.claim_id_of_parent; + } + + if(claim_id_is_set(ret)) + uuid_unparse_lower(ret.uuid.uuid, ret.str); + + return ret; +} diff --git a/src/claim/claim_id.h b/src/claim/claim_id.h new file mode 100644 index 00000000000000..95958d430dbd9a --- /dev/null +++ b/src/claim/claim_id.h @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CLAIM_ID_H +#define NETDATA_CLAIM_ID_H + +#include "claim.h" + +void claim_id_keep_current(void); + +bool claim_id_set_str(const char *claim_id_str); +void claim_id_set(ND_UUID new_claim_id); +void claim_id_clear_previous_working(void); +ND_UUID claim_id_get_uuid(void); +void claim_id_get_str(char str[UUID_STR_LEN]); +const char *claim_id_get_str_mallocz(void); + +typedef struct { + ND_UUID uuid; + char str[UUID_STR_LEN]; +} CLAIM_ID; + +#define claim_id_is_set(claim_id) (!UUIDiszero(claim_id.uuid)) + +CLAIM_ID claim_id_get(void); +CLAIM_ID claim_id_get_last_working(void); +CLAIM_ID rrdhost_claim_id_get(RRDHOST *host); + +#endif //NETDATA_CLAIM_ID_H diff --git a/src/claim/cloud-conf.c b/src/claim/cloud-conf.c new file mode 100644 index 00000000000000..9ee617130f6018 --- /dev/null +++ b/src/claim/cloud-conf.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "claim.h" + +struct config cloud_config = { + .first_section = NULL, + .last_section = NULL, + .mutex = NETDATA_MUTEX_INITIALIZER, + .index = { + .avl_tree = { + .root = NULL, + .compar = appconfig_section_compare + }, + .rwlock = AVL_LOCK_INITIALIZER + } +}; + +const char *cloud_config_url_get(void) { + return appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "url", DEFAULT_CLOUD_BASE_URL); +} + +void cloud_config_url_set(const char *url) { + if(!url || *url) return; + + const char *existing = cloud_config_url_get(); + if(strcmp(existing, url) != 0) + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "url", url); +} + +const char *cloud_config_proxy_get(void) { + // load cloud.conf or internal default + const char *proxy = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "proxy", "env"); + + // backwards compatibility, from when proxy was in netdata.conf + // netdata.conf has bigger priority + if (config_exists(CONFIG_SECTION_CLOUD, "proxy")) { + // get it from netdata.conf + proxy = config_get(CONFIG_SECTION_CLOUD, "proxy", proxy); + + // update cloud.conf + proxy = appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "proxy", proxy); + } + else { + // set in netdata.conf the proxy of cloud.conf + config_set(CONFIG_SECTION_CLOUD, "proxy", proxy); + } + + return proxy; +} + +bool cloud_config_insecure_get(void) { + // load it from cloud.conf or use internal default + return appconfig_get_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "insecure", CONFIG_BOOLEAN_NO); +} + +static void cloud_conf_load_defaults(void) { + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "url", DEFAULT_CLOUD_BASE_URL); + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "proxy", "env"); + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "token", ""); + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "rooms", ""); + appconfig_get_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "insecure", CONFIG_BOOLEAN_NO); + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "machine_guid", ""); + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "claimed_id", ""); + appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "hostname", ""); +} + +void cloud_conf_load(int silent) { + errno_clear(); + char *filename = filename_from_path_entry_strdupz(netdata_configured_cloud_dir, "cloud.conf"); + int ret = appconfig_load(&cloud_config, filename, 1, NULL); + + if(!ret && !silent) + nd_log(NDLS_DAEMON, NDLP_ERR, + "CLAIM: cannot load cloud config '%s'. Running with internal defaults.", filename); + + freez(filename); + + appconfig_move(&cloud_config, + CONFIG_SECTION_GLOBAL, "cloud base url", + CONFIG_SECTION_GLOBAL, "url"); + + cloud_conf_load_defaults(); +} + +void cloud_conf_init_after_registry(void) { + const char *machine_guid = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "machine_guid", ""); + const char *hostname = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "hostname", ""); + + // for machine guid and hostname we have to use appconfig_set() for that they will be saved uncommented + if(!machine_guid || !*machine_guid) + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "machine_guid", registry_get_this_machine_guid()); + + if(!hostname || !*hostname) + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "hostname", registry_get_this_machine_hostname()); +} + +bool cloud_conf_save(void) { + char filename[FILENAME_MAX + 1]; + + CLEAN_BUFFER *wb = buffer_create(0, NULL); + appconfig_generate(&cloud_config, wb, false, false); + snprintfz(filename, sizeof(filename), "%s/cloud.conf", netdata_configured_cloud_dir); + FILE *fp = fopen(filename, "w"); + if(!fp) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Cannot open file '%s' for writing.", filename); + return false; + } + + fprintf(fp, "%s", buffer_tostring(wb)); + fclose(fp); + return true; +} + +bool cloud_conf_regenerate(const char *claimed_id_str, const char *machine_guid, const char *hostname, const char *token, const char *rooms, const char *url, const char *proxy, int insecure) { + // for backwards compatibility (older agents), save the claimed_id to its file + claimed_id_save_to_file(claimed_id_str); + + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "url", url); + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "proxy", proxy ? proxy : ""); + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "token", token ? token : ""); + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "rooms", rooms ? rooms : ""); + appconfig_set_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "insecure", insecure); + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "machine_guid", machine_guid ? machine_guid : ""); + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "claimed_id", claimed_id_str ? claimed_id_str : ""); + appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "hostname", hostname ? hostname : ""); + + return cloud_conf_save(); +} diff --git a/src/claim/cloud-status.c b/src/claim/cloud-status.c new file mode 100644 index 00000000000000..426c59a68c8aed --- /dev/null +++ b/src/claim/cloud-status.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "claim.h" + +const char *cloud_status_to_string(CLOUD_STATUS status) { + switch(status) { + default: + case CLOUD_STATUS_AVAILABLE: + return "available"; + + case CLOUD_STATUS_BANNED: + return "banned"; + + case CLOUD_STATUS_OFFLINE: + return "offline"; + + case CLOUD_STATUS_ONLINE: + return "online"; + + case CLOUD_STATUS_INDIRECT: + return "indirect"; + } +} + +CLOUD_STATUS cloud_status(void) { + if(unlikely(aclk_disable_runtime)) + return CLOUD_STATUS_BANNED; + + if(likely(aclk_online())) + return CLOUD_STATUS_ONLINE; + + if(localhost->sender && + rrdhost_flag_check(localhost, RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS) && + stream_has_capability(localhost->sender, STREAM_CAP_NODE_ID) && + !uuid_is_null(localhost->node_id) && + !UUIDiszero(localhost->aclk.claim_id_of_parent)) + return CLOUD_STATUS_INDIRECT; + + if(is_agent_claimed()) + return CLOUD_STATUS_OFFLINE; + + return CLOUD_STATUS_AVAILABLE; +} + +time_t cloud_last_change(void) { + time_t ret = MAX(last_conn_time_mqtt, last_disconnect_time); + if(!ret) ret = netdata_start_time; + return ret; +} + +time_t cloud_next_connection_attempt(void) { + return next_connection_attempt; +} + +size_t cloud_connection_id(void) { + return aclk_connection_counter; +} + +const char *cloud_status_aclk_offline_reason() { + if(aclk_disable_runtime) + return "banned"; + + return aclk_status_to_string(); +} + +const char *cloud_status_aclk_base_url() { + return aclk_cloud_base_url; +} + +CLOUD_STATUS buffer_json_cloud_status(BUFFER *wb, time_t now_s) { + CLOUD_STATUS status = cloud_status(); + + buffer_json_member_add_object(wb, "cloud"); + { + size_t id = cloud_connection_id(); + time_t last_change = cloud_last_change(); + time_t next_connect = cloud_next_connection_attempt(); + buffer_json_member_add_uint64(wb, "id", id); + buffer_json_member_add_string(wb, "status", cloud_status_to_string(status)); + buffer_json_member_add_time_t(wb, "since", last_change); + buffer_json_member_add_time_t(wb, "age", now_s - last_change); + + switch(status) { + default: + case CLOUD_STATUS_AVAILABLE: + // the agent is not claimed + buffer_json_member_add_string(wb, "url", cloud_config_url_get()); + buffer_json_member_add_string(wb, "reason", claim_agent_failure_reason_get()); + break; + + case CLOUD_STATUS_BANNED: { + // the agent is claimed, but has been banned from NC + CLAIM_ID claim_id = claim_id_get(); + buffer_json_member_add_string(wb, "claim_id", claim_id.str); + buffer_json_member_add_string(wb, "url", cloud_status_aclk_base_url()); + buffer_json_member_add_string(wb, "reason", "Agent is banned from Netdata Cloud"); + buffer_json_member_add_string(wb, "url", cloud_config_url_get()); + break; + } + + case CLOUD_STATUS_OFFLINE: { + // the agent is claimed, but cannot get online + CLAIM_ID claim_id = claim_id_get(); + buffer_json_member_add_string(wb, "claim_id", claim_id.str); + buffer_json_member_add_string(wb, "url", cloud_status_aclk_base_url()); + buffer_json_member_add_string(wb, "reason", cloud_status_aclk_offline_reason()); + if (next_connect > now_s) { + buffer_json_member_add_time_t(wb, "next_check", next_connect); + buffer_json_member_add_time_t(wb, "next_in", next_connect - now_s); + } + break; + } + + case CLOUD_STATUS_ONLINE: { + // the agent is claimed and online + CLAIM_ID claim_id = claim_id_get(); + buffer_json_member_add_string(wb, "claim_id", claim_id.str); + buffer_json_member_add_string(wb, "url", cloud_status_aclk_base_url()); + buffer_json_member_add_string(wb, "reason", ""); + break; + } + + case CLOUD_STATUS_INDIRECT: { + CLAIM_ID claim_id = rrdhost_claim_id_get(localhost); + buffer_json_member_add_string(wb, "claim_id", claim_id.str); + buffer_json_member_add_string(wb, "url", cloud_config_url_get()); + break; + } + } + } + buffer_json_object_close(wb); // cloud + + return status; +} diff --git a/src/claim/cloud-status.h b/src/claim/cloud-status.h new file mode 100644 index 00000000000000..648c114f9a307e --- /dev/null +++ b/src/claim/cloud-status.h @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CLOUD_STATUS_H +#define NETDATA_CLOUD_STATUS_H + +#include "daemon/common.h" + +typedef enum __attribute__((packed)) { + CLOUD_STATUS_AVAILABLE = 1, // cloud and aclk functionality is available, but the agent is not claimed + CLOUD_STATUS_BANNED, // the agent has been banned from cloud + CLOUD_STATUS_OFFLINE, // the agent tries to connect to cloud, but cannot do it + CLOUD_STATUS_INDIRECT, // the agent is connected to cloud via a parent + CLOUD_STATUS_ONLINE, // the agent is connected to cloud +} CLOUD_STATUS; + +const char *cloud_status_to_string(CLOUD_STATUS status); +CLOUD_STATUS cloud_status(void); + +time_t cloud_last_change(void); +time_t cloud_next_connection_attempt(void); +size_t cloud_connection_id(void); +const char *cloud_status_aclk_offline_reason(void); +const char *cloud_status_aclk_base_url(void); +CLOUD_STATUS buffer_json_cloud_status(BUFFER *wb, time_t now_s); + +#endif //NETDATA_CLOUD_STATUS_H diff --git a/src/claim/netdata-claim.sh.in b/src/claim/netdata-claim.sh.in index f4fa382b694dbe..75809292e7bc20 100755 --- a/src/claim/netdata-claim.sh.in +++ b/src/claim/netdata-claim.sh.in @@ -1,451 +1,111 @@ -#!/usr/bin/env bash -# netdata -# real-time performance and health monitoring, done right! -# (C) 2023 Netdata Inc. -# SPDX-License-Identifier: GPL-3.0-or-later - -# Exit code: 0 - Success -# Exit code: 1 - Unknown argument -# Exit code: 2 - Problems with claiming working directory -# Exit code: 3 - Missing dependencies -# Exit code: 4 - Failure to connect to endpoint -# Exit code: 5 - The CLI didn't work -# Exit code: 6 - Wrong user -# Exit code: 7 - Unknown HTTP error message -# -# OK: Agent claimed successfully -# HTTP Status code: 204 -# Exit code: 0 +#!/bin/sh # -# Unknown HTTP error message -# HTTP Status code: 422 -# Exit code: 7 -ERROR_KEYS[7]="None" -ERROR_MESSAGES[7]="Unknown HTTP error message" - -# Error: The agent id is invalid; it does not fulfill the constraints -# HTTP Status code: 422 -# Exit code: 8 -ERROR_KEYS[8]="ErrInvalidNodeID" -ERROR_MESSAGES[8]="invalid node id" - -# Error: The agent hostname is invalid; it does not fulfill the constraints -# HTTP Status code: 422 -# Exit code: 9 -ERROR_KEYS[9]="ErrInvalidNodeName" -ERROR_MESSAGES[9]="invalid node name" - -# Error: At least one of the given rooms ids is invalid; it does not fulfill the constraints -# HTTP Status code: 422 -# Exit code: 10 -ERROR_KEYS[10]="ErrInvalidRoomID" -ERROR_MESSAGES[10]="invalid room id" - -# Error: Invalid public key; the public key is empty or not present -# HTTP Status code: 422 -# Exit code: 11 -ERROR_KEYS[11]="ErrInvalidPublicKey" -ERROR_MESSAGES[11]="invalid public key" +# Copyright (c) 2024 Netdata Inc. +# SPDX-License-Identifier: GPL-3.0-or-later # -# Error: Expired, missing or invalid token -# HTTP Status code: 403 -# Exit code: 12 -ERROR_KEYS[12]="ErrForbidden" -ERROR_MESSAGES[12]="token expired/token not found/invalid token" - -# Error: Duplicate agent id; an agent with the same id is already registered in the cloud -# HTTP Status code: 409 -# Exit code: 13 -ERROR_KEYS[13]="ErrAlreadyClaimed" -ERROR_MESSAGES[13]="already claimed" - -# Error: The node claiming process is still in progress. -# HTTP Status code: 102 -# Exit code: 14 -ERROR_KEYS[14]="ErrProcessingClaim" -ERROR_MESSAGES[14]="processing claiming" +# %%NEW_CLAIMING_METHOD%% -# Error: Internal server error. Any other unexpected error (DB problems, etc.) -# HTTP Status code: 500 -# Exit code: 15 -ERROR_KEYS[15]="ErrInternalServerError" -ERROR_MESSAGES[15]="Internal Server Error" +set -e -# Error: There was a timeout processing the claim. -# HTTP Status code: 504 -# Exit code: 16 -ERROR_KEYS[16]="ErrGatewayTimeout" -ERROR_MESSAGES[16]="Gateway Timeout" - -# Error: The service cannot handle the claiming request at this time. -# HTTP Status code: 503 -# Exit code: 17 -ERROR_KEYS[17]="ErrServiceUnavailable" -ERROR_MESSAGES[17]="Service Unavailable" - -# Exit code: 18 - Agent unique id is not generated yet. - -NETDATA_RUNNING=1 - -get_config_value() { - conf_file="${1}" - section="${2}" - key_name="${3}" - if [ "${NETDATA_RUNNING}" -eq 1 ]; then - config_result=$(@sbindir_POST@/netdatacli 2>/dev/null read-config "$conf_file|$section|$key_name"; exit $?) - result="$?" - if [ "${result}" -ne 0 ]; then - echo >&2 "Unable to communicate with Netdata daemon, querying config from disk instead." - NETDATA_RUNNING=0 - fi - fi - if [ "${NETDATA_RUNNING}" -eq 0 ]; then - config_result=$(@sbindir_POST@/netdata 2>/dev/null -W get2 "$conf_file" "$section" "$key_name" unknown_default) - fi - echo "$config_result" +warning() { + printf "WARNING: %s\n" "${1}" 1>&2 } -if command -v curl >/dev/null 2>&1 ; then - URLTOOL="curl" -elif command -v wget >/dev/null 2>&1 ; then - URLTOOL="wget" -else - echo >&2 "I need curl or wget to proceed, but neither is available on this system." - exit 3 -fi -if ! command -v openssl >/dev/null 2>&1 ; then - echo >&2 "I need openssl to proceed, but it is not available on this system." - exit 3 -fi - -# shellcheck disable=SC2050 -if [ "@enable_cloud_POST@" = "no" ]; then - echo >&2 "This agent was built with --disable-cloud and cannot be claimed" - exit 3 -fi -# shellcheck disable=SC2050 -if [ "@enable_aclk_POST@" != "yes" ]; then - echo >&2 "This agent was built without the dependencies for Cloud and cannot be claimed" - exit 3 -fi - -# ----------------------------------------------------------------------------- -# defaults to allow running this script by hand - -[ -z "${NETDATA_VARLIB_DIR}" ] && NETDATA_VARLIB_DIR="@varlibdir_POST@" -MACHINE_GUID_FILE="@registrydir_POST@/netdata.public.unique.id" -CLAIMING_DIR="${NETDATA_VARLIB_DIR}/cloud.d" -TOKEN="unknown" -URL_BASE=$(get_config_value cloud global "cloud base url") -[ -z "$URL_BASE" ] && URL_BASE="https://app.netdata.cloud" # Cover post-install with --dont-start -ID="unknown" -ROOMS="" -[ -z "$HOSTNAME" ] && HOSTNAME=$(hostname) -CLOUD_CERTIFICATE_FILE="${CLAIMING_DIR}/cloud_fullchain.pem" -VERBOSE=0 -INSECURE=0 -RELOAD=1 -NETDATA_USER=$(get_config_value netdata global "run as user") -[ -z "$EUID" ] && EUID="$(id -u)" +error() { + printf "ERROR: %s\n" "${1}" 1>&2 + exit "${2}" +} -gen_id() { - local id - - if command -v uuidgen > /dev/null 2>&1; then - id="$(uuidgen | tr '[:upper:]' '[:lower:]')" - elif [ -r /proc/sys/kernel/random/uuid ]; then - id="$(cat /proc/sys/kernel/random/uuid)" - else - echo >&2 "Unable to generate machine ID." - exit 18 - fi - - if [ "${id}" = "8a795b0c-2311-11e6-8563-000c295076a6" ] || [ "${id}" = "4aed1458-1c3e-11e6-a53f-000c290fc8f5" ]; then - gen_id +get_templated_value() { + value="$1" + default="$2" + override="$3" + + if [ -n "${override}" ]; then + echo "${override}" + elif [ -z "${value}" ]; then + error "Expected templated value not present" + elif (echo "${value}" | grep -q '@'); then + echo "${default}" else - echo "${id}" + echo "${value}" fi } -# get the MACHINE_GUID by default -if [ -r "${MACHINE_GUID_FILE}" ]; then - ID="$(cat "${MACHINE_GUID_FILE}")" - MGUID=$ID -elif [ -f "${MACHINE_GUID_FILE}" ]; then - echo >&2 "netdata.public.unique.id is not readable. Please make sure you have rights to read it (Filename: ${MACHINE_GUID_FILE})." - exit 18 -else - if mkdir -p "${MACHINE_GUID_FILE%/*}" && echo -n "$(gen_id)" > "${MACHINE_GUID_FILE}"; then - ID="$(cat "${MACHINE_GUID_FILE}")" - MGUID=$ID - else - echo >&2 "Failed to write new machine GUID. Please make sure you have rights to write to ${MACHINE_GUID_FILE}." - exit 18 - fi -fi - -# get token from file -if [ -r "${CLAIMING_DIR}/token" ]; then - TOKEN="$(cat "${CLAIMING_DIR}/token")" -fi - -# get rooms from file -if [ -r "${CLAIMING_DIR}/rooms" ]; then - ROOMS="$(cat "${CLAIMING_DIR}/rooms")" -fi - -variable_to_set= -for arg in "$@" -do - if [ -z "$variable_to_set" ]; then - case $arg in - --claim-token) variable_to_set="TOKEN" ;; - --claim-rooms) variable_to_set="ROOMS" ;; - --claim-url) variable_to_set="URL_BASE" ;; - -token=*) TOKEN=${arg:7} ;; - -url=*) [ -n "${arg:5}" ] && URL_BASE=${arg:5} ;; - -id=*) ID=$(echo "${arg:4}" | tr '[:upper:]' '[:lower:]');; - -rooms=*) ROOMS=${arg:7} ;; - -hostname=*) HOSTNAME=${arg:10} ;; - -verbose) VERBOSE=1 ;; - -insecure) INSECURE=1 ;; - -proxy=*) PROXY=${arg:7} ;; - -noproxy) NOPROXY=yes ;; - -noreload) RELOAD=0 ;; - -user=*) NETDATA_USER=${arg:6} ;; - -daemon-not-running) NETDATA_RUNNING=0 ;; - *) echo >&2 "Unknown argument ${arg}" - exit 1 ;; - esac - else - case "$variable_to_set" in - TOKEN) TOKEN="$arg" ;; - ROOMS) ROOMS="$arg" ;; - URL_BASE) URL_BASE="$arg" ;; - esac - variable_to_set= - fi - shift 1 -done - -if [ "$EUID" != "0" ] && [ "$(whoami)" != "$NETDATA_USER" ]; then - echo >&2 "This script must be run by the $NETDATA_USER user account" - exit 6 -fi - -# if curl not installed give warning SOCKS can't be used -if [[ "${URLTOOL}" != "curl" && "${PROXY:0:5}" = socks ]] ; then - echo >&2 "wget doesn't support SOCKS. Please install curl or disable SOCKS proxy." - exit 1 -fi - -echo >&2 "Token: ****************" -echo >&2 "Base URL: $URL_BASE" -echo >&2 "Id: $ID" -echo >&2 "Rooms: $ROOMS" -echo >&2 "Hostname: $HOSTNAME" -echo >&2 "Proxy: $PROXY" -echo >&2 "Netdata user: $NETDATA_USER" - -# create the claiming directory for this user -if [ ! -d "${CLAIMING_DIR}" ] ; then - mkdir -p "${CLAIMING_DIR}" && chmod 0770 "${CLAIMING_DIR}" -# shellcheck disable=SC2181 - if [ $? -ne 0 ] ; then - echo >&2 "Failed to create claiming working directory ${CLAIMING_DIR}" - exit 2 - fi -fi -if [ ! -w "${CLAIMING_DIR}" ] ; then - echo >&2 "No write permission in claiming working directory ${CLAIMING_DIR}" - exit 2 -fi - -if [ ! -f "${CLAIMING_DIR}/private.pem" ] ; then - echo >&2 "Generating private/public key for the first time." - if ! openssl genrsa -out "${CLAIMING_DIR}/private.pem" 2048 ; then - echo >&2 "Failed to generate private/public key pair." - exit 2 - fi -fi -if [ ! -f "${CLAIMING_DIR}/public.pem" ] ; then - echo >&2 "Extracting public key from private key." - if ! openssl rsa -in "${CLAIMING_DIR}/private.pem" -outform PEM -pubout -out "${CLAIMING_DIR}/public.pem" ; then - echo >&2 "Failed to extract public key." - exit 2 - fi -fi - -TARGET_URL="${URL_BASE%/}/api/v1/spaces/nodes/${ID}" -# shellcheck disable=SC2002 -KEY=$(cat "${CLAIMING_DIR}/public.pem" | tr '\n' '!' | sed -e 's/!/\\n/g') -# shellcheck disable=SC2001 -[ -n "$ROOMS" ] && ROOMS=\"$(echo "$ROOMS" | sed s'/,/", "/g')\" +config_dir="$(get_templated_value "@configdir_POST@" "/etc/netdata" "${NETDATA_CLAIM_CONFIG_DIR}")" +claim_config="${config_dir}/claim.conf" +netdatacli="$(get_templated_value "@sbindir_POST@/netdatacli" "$(command -v netdatacli 2>/dev/null)" "${NETDATA_CLAIM_NETDATACLI_PATH}")" +netdata_group="$(get_templated_value "@netdata_group_POST@" "netdata" "${NETDATA_CLAIM_CONFIG_GROUP}")" + +write_config() { + config="[global]" + config="${config}\n url = ${NETDATA_CLAIM_URL}" + config="${config}\n token = ${NETDATA_CLAIM_TOKEN}" + if [ -n "${NETDATA_CLAIM_ROOMS}" ]; then + config="${config}\n rooms = ${NETDATA_CLAIM_ROOMS}" + fi + if [ -n "${NETDATA_CLAIM_PROXY}" ]; then + config="${config}\n proxy = ${NETDATA_CLAIM_PROXY}" + fi + if [ -n "${NETDATA_CLAIM_INSECURE}" ]; then + config="${config}\n insecure = ${NETDATA_CLAIM_INSECURE}" + fi -cat > "${CLAIMING_DIR}/tmpin.txt" < "${claim_config}.tmp" + chmod 0640 "${claim_config}.tmp" + mv -f "${claim_config}.tmp" "${claim_config}" } -EMBED_JSON - -if [ "${VERBOSE}" == 1 ] ; then - echo "Request to server:" - cat "${CLAIMING_DIR}/tmpin.txt" -fi - -if [ "${URLTOOL}" = "curl" ] ; then - URLCOMMAND="curl --connect-timeout 30 --retry 0 -s -i -X PUT -d \"@${CLAIMING_DIR}/tmpin.txt\"" - if [ "${NOPROXY}" = "yes" ] ; then - URLCOMMAND="${URLCOMMAND} -x \"\"" - elif [ -n "${PROXY}" ] ; then - URLCOMMAND="${URLCOMMAND} -x \"${PROXY}\"" - fi -else - URLCOMMAND="wget -T 15 -O - -q --server-response --content-on-error=on --method=PUT \ - --body-file=\"${CLAIMING_DIR}/tmpin.txt\"" - if [ "${NOPROXY}" = "yes" ] ; then - URLCOMMAND="${URLCOMMAND} --no-proxy" - elif [ "${PROXY:0:4}" = http ] ; then - URLCOMMAND="export http_proxy=${PROXY}; ${URLCOMMAND}" - fi -fi - -if [ "${INSECURE}" == 1 ] ; then - if [ "${URLTOOL}" = "curl" ] ; then - URLCOMMAND="${URLCOMMAND} --insecure" - else - URLCOMMAND="${URLCOMMAND} --no-check-certificate" +reload_claiming() { + if [ -z "${NORELOAD}" ]; then + "${netdatacli}" reload-claiming-state fi -fi - -if [ -r "${CLOUD_CERTIFICATE_FILE}" ] ; then - if [ "${URLTOOL}" = "curl" ] ; then - URLCOMMAND="${URLCOMMAND} --cacert \"${CLOUD_CERTIFICATE_FILE}\"" - else - URLCOMMAND="${URLCOMMAND} --ca-certificate \"${CLOUD_CERTIFICATE_FILE}\"" - fi -fi - -if [ "${VERBOSE}" == 1 ]; then - echo "${URLCOMMAND} \"${TARGET_URL}\"" -fi - -attempt_contact () { - if [ "${URLTOOL}" = "curl" ] ; then - eval "${URLCOMMAND} \"${TARGET_URL}\"" >"${CLAIMING_DIR}/tmpout.txt" - else - eval "${URLCOMMAND} \"${TARGET_URL}\"" >"${CLAIMING_DIR}/tmpout.txt" 2>&1 - fi - URLCOMMAND_EXIT_CODE=$? - if [ "${URLTOOL}" = "wget" ] && [ "${URLCOMMAND_EXIT_CODE}" -eq 8 ] ; then - # We consider the server issuing an error response a successful attempt at communicating - URLCOMMAND_EXIT_CODE=0 - fi - - # Check if URLCOMMAND connected and received reply - if [ "${URLCOMMAND_EXIT_CODE}" -ne 0 ] ; then - echo >&2 "Failed to connect to ${URL_BASE}, return code ${URLCOMMAND_EXIT_CODE}" - rm -f "${CLAIMING_DIR}/tmpout.txt" - return 4 - fi - - if [ "${VERBOSE}" == 1 ] ; then - echo "Response from server:" - cat "${CLAIMING_DIR}/tmpout.txt" - fi - - return 0 } -for i in {1..3} -do - if attempt_contact ; then - echo "Connection attempt $i successful" - break - fi - echo "Connection attempt $i failed. Retry in ${i}s." - if [ "$i" -eq 5 ] ; then - rm -f "${CLAIMING_DIR}/tmpin.txt" - exit 4 - fi - sleep "$i" -done +parse_args() { + while [ -n "${1}" ]; do + case "${1}" in + --claim-token) NETDATA_CLAIM_TOKEN="${2}"; shift 1 ;; + -token=*) NETDATA_CLAIM_TOKEN="$(echo "${1}" | sed 's/^-token=//')" ;; + --claim-rooms) NETDATA_CLAIM_ROOMS="${2}"; shift 1 ;; + -rooms=*) NETDATA_CLAIM_ROOMS="$(echo "${1}" | sed 's/^-rooms=//')" ;; + --claim-url) NETDATA_CLAIM_URL="${2}"; shift 1 ;; + -url=*) NETDATA_CLAIM_URL="$(echo "${1}" | sed 's/^-url=/')" ;; + --claim-proxy) NETDATA_CLAIM_PROXY="${2}"; shift 1 ;; + -proxy=*) NETDATA_CLAIM_PROXY="$(echo "${1}" | sed 's/-proxy=//')" ;; + -noproxy|--noproxy) NETDATA_CLAIM_PROXY="none" ;; + -noreload|--noreload) NORELOAD=1 ;; + -insecure|--insecure) NETDATA_CLAIM_INSECURE=yes ;; + -verbose) true ;; + -daemon-not-running) true ;; + -id=*) warning "-id option is no longer supported. Remove the node ID file instead." ;; + -hostname=*) warning "-hostname option is no longer supported. Update the main netdata configuration manually instead." ;; + -user=*) warning "-user option is no longer supported." ;; + *) warning "Ignoring unrecognized option ${1}";; + esac -rm -f "${CLAIMING_DIR}/tmpin.txt" - -ERROR_KEY=$(grep "\"errorMsgKey\":" "${CLAIMING_DIR}/tmpout.txt" | awk -F "errorMsgKey\":\"" '{print $2}' | awk -F "\"" '{print $1}') -case ${ERROR_KEY} in - "ErrInvalidNodeID") EXIT_CODE=8 ;; - "ErrInvalidNodeName") EXIT_CODE=9 ;; - "ErrInvalidRoomID") EXIT_CODE=10 ;; - "ErrInvalidPublicKey") EXIT_CODE=11 ;; - "ErrForbidden") EXIT_CODE=12 ;; - "ErrAlreadyClaimed") EXIT_CODE=13 ;; - "ErrProcessingClaim") EXIT_CODE=14 ;; - "ErrInternalServerError") EXIT_CODE=15 ;; - "ErrGatewayTimeout") EXIT_CODE=16 ;; - "ErrServiceUnavailable") EXIT_CODE=17 ;; - *) EXIT_CODE=7 ;; -esac - -HTTP_STATUS_CODE=$(grep "HTTP" "${CLAIMING_DIR}/tmpout.txt" | tail -1 | awk -F " " '{print $2}') -if [ "${HTTP_STATUS_CODE}" = "204" ] ; then - EXIT_CODE=0 -fi + shift 1 + done -if [ "${HTTP_STATUS_CODE}" = "204" ] || [ "${ERROR_KEY}" = "ErrAlreadyClaimed" ] ; then - rm -f "${CLAIMING_DIR}/tmpout.txt" - if [ "${HTTP_STATUS_CODE}" = "204" ] ; then - echo -n "${ID}" >"${CLAIMING_DIR}/claimed_id" || (echo >&2 "Claiming failed"; set -e; exit 2) - fi - rm -f "${CLAIMING_DIR}/token" || (echo >&2 "Claiming failed"; set -e; exit 2) + if [ -z "${NETDATA_CLAIM_TOKEN}" ]; then + error "Claim token must be specified" 1 + fi - # Rewrite the cloud.conf on the disk - cat > "$CLAIMING_DIR/cloud.conf" <&2 "Claiming failed"; set -e; exit 2) - fi - if [ "${RELOAD}" == "0" ] ; then - exit $EXIT_CODE - fi + if [ -z "${NETDATA_CLAIM_URL}" ]; then + NETDATA_CLAIM_URL="https://app.netdata.cloud/" + fi +} - # Update cloud.conf in the agent memory - @sbindir_POST@/netdatacli write-config 'cloud|global|enabled|yes' && \ - @sbindir_POST@/netdatacli write-config "cloud|global|cloud base url|$URL_BASE" && \ - @sbindir_POST@/netdatacli reload-claiming-state && \ - if [ "${HTTP_STATUS_CODE}" = "204" ] ; then - echo >&2 "Node was successfully claimed." - else - echo >&2 "The agent cloud base url is set to the url provided." - echo >&2 "The cloud may have different credentials already registered for this agent ID and it cannot be reclaimed under different credentials for security reasons. If you are unable to connect use -id=\$(uuidgen) to overwrite this agent ID with a fresh value if the original credentials cannot be restored." - echo >&2 "Failed to claim node with the following error message:\"${ERROR_MESSAGES[$EXIT_CODE]}\"" - fi && exit $EXIT_CODE - if [ "${ERROR_KEY}" = "ErrAlreadyClaimed" ] ; then - echo >&2 "The cloud may have different credentials already registered for this agent ID and it cannot be reclaimed under different credentials for security reasons. If you are unable to connect use -id=\$(uuidgen) to overwrite this agent ID with a fresh value if the original credentials cannot be restored." - echo >&2 "Failed to claim node with the following error message:\"${ERROR_MESSAGES[$EXIT_CODE]}\"" - exit $EXIT_CODE - fi - echo >&2 "The claim was successful but the agent could not be notified ($?)- it requires a restart to connect to the cloud." - [ "$NETDATA_RUNNING" -eq 0 ] && exit 0 || exit 5 +[ -z "$EUID" ] && EUID="$(id -u)" +if [ "${EUID}" != "0" ] && [ ! -w "${config_dir}" ]; then + error "Script must be run by a user with write access to ${config_dir}." 32 fi -echo >&2 "Failed to claim node with the following error message:\"${ERROR_MESSAGES[$EXIT_CODE]}\"" -if [ "${VERBOSE}" == 1 ]; then - echo >&2 "Error key was:\"${ERROR_KEYS[$EXIT_CODE]}\"" -fi -rm -f "${CLAIMING_DIR}/tmpout.txt" -exit $EXIT_CODE +warning "This script is deprecated and will be officially unsupported in the near future. Please either use the kickstart script with the appropriate '--claim-*' options, or directly write out the claiming configuration instead." +parse_args "${@}" +write_config +reload_claiming diff --git a/src/collectors/apps.plugin/README.md b/src/collectors/apps.plugin/README.md index ced91d8ae9c2db..8bd1d3c7e442c1 100644 --- a/src/collectors/apps.plugin/README.md +++ b/src/collectors/apps.plugin/README.md @@ -237,7 +237,7 @@ Examples below for process group `sql`: - Open Pipes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.fds_pipes&dimensions=sql&value_color=green=0%7Cred) - Open Sockets ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.fds_sockets&dimensions=sql&value_color=green%3E=3%7Cred) -For more information about badges check [Generating Badges](/src/web/api/badges/README.md) +For more information about badges check [Generating Badges](/src/web/api/v2/api_v3_badge/README.md) ## Comparison with console tools diff --git a/src/collectors/cgroups.plugin/cgroup-discovery.c b/src/collectors/cgroups.plugin/cgroup-discovery.c index d880f8a711b44a..315b0c042394ef 100644 --- a/src/collectors/cgroups.plugin/cgroup-discovery.c +++ b/src/collectors/cgroups.plugin/cgroup-discovery.c @@ -188,7 +188,7 @@ static inline void discovery_rename_cgroup(struct cgroup *cg) { } char buffer[CGROUP_CHARTID_LINE_MAX + 1]; - char *new_name = fgets(buffer, CGROUP_CHARTID_LINE_MAX, instance->child_stdout_fp); + char *new_name = fgets(buffer, CGROUP_CHARTID_LINE_MAX, spawn_popen_stdout(instance)); int exit_code = spawn_popen_wait(instance); switch (exit_code) { @@ -1101,7 +1101,7 @@ static inline void read_cgroup_network_interfaces(struct cgroup *cg) { char *s; char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; - while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, instance->child_stdout_fp))) { + while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, spawn_popen_stdout(instance)))) { trim(s); if(*s && *s != '\n') { diff --git a/src/collectors/cgroups.plugin/cgroup-internals.h b/src/collectors/cgroups.plugin/cgroup-internals.h index e0d53dc93e98f6..d0b6641e247399 100644 --- a/src/collectors/cgroups.plugin/cgroup-internals.h +++ b/src/collectors/cgroups.plugin/cgroup-internals.h @@ -394,8 +394,8 @@ static inline char *cgroup_chart_type(char *buffer, struct cgroup *cg) { #define RRDFUNCTIONS_CGTOP_HELP "View running containers" #define RRDFUNCTIONS_SYSTEMD_SERVICES_HELP "View systemd services" -int cgroup_function_cgroup_top(BUFFER *wb, const char *function); -int cgroup_function_systemd_top(BUFFER *wb, const char *function); +int cgroup_function_cgroup_top(BUFFER *wb, const char *function, BUFFER *payload, const char *source); +int cgroup_function_systemd_top(BUFFER *wb, const char *function, BUFFER *payload, const char *source); void cgroup_netdev_link_init(void); const DICTIONARY_ITEM *cgroup_netdev_get(struct cgroup *cg); diff --git a/src/collectors/cgroups.plugin/cgroup-network.c b/src/collectors/cgroups.plugin/cgroup-network.c index 4cb5cbabe948de..2d9b57db4eadc5 100644 --- a/src/collectors/cgroups.plugin/cgroup-network.c +++ b/src/collectors/cgroups.plugin/cgroup-network.c @@ -518,7 +518,7 @@ void call_the_helper(pid_t pid, const char *cgroup) { if(pi) { char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1]; char *s; - while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, pi->child_stdout_fp))) { + while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, spawn_popen_stdout(pi)))) { trim(s); if(*s && *s != '\n') { diff --git a/src/collectors/cgroups.plugin/cgroup-top.c b/src/collectors/cgroups.plugin/cgroup-top.c index aa413dad11dd43..7b98502b5c5af5 100644 --- a/src/collectors/cgroups.plugin/cgroup-top.c +++ b/src/collectors/cgroups.plugin/cgroup-top.c @@ -98,7 +98,7 @@ void cgroup_netdev_get_bandwidth(struct cgroup *cg, NETDATA_DOUBLE *received, NE *sent = t->sent[slot]; } -int cgroup_function_cgroup_top(BUFFER *wb, const char *function __maybe_unused) { +int cgroup_function_cgroup_top(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { buffer_flush(wb); wb->content_type = CT_APPLICATION_JSON; buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); @@ -341,7 +341,7 @@ int cgroup_function_cgroup_top(BUFFER *wb, const char *function __maybe_unused) return HTTP_RESP_OK; } -int cgroup_function_systemd_top(BUFFER *wb, const char *function __maybe_unused) { +int cgroup_function_systemd_top(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { buffer_flush(wb); wb->content_type = CT_APPLICATION_JSON; buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); diff --git a/src/collectors/cgroups.plugin/sys_fs_cgroup.c b/src/collectors/cgroups.plugin/sys_fs_cgroup.c index 5fdefa863caf6a..c970119eaf4239 100644 --- a/src/collectors/cgroups.plugin/sys_fs_cgroup.c +++ b/src/collectors/cgroups.plugin/sys_fs_cgroup.c @@ -82,7 +82,7 @@ static enum cgroups_systemd_setting cgroups_detect_systemd(const char *exec) return retval; struct pollfd pfd; - pfd.fd = spawn_server_instance_read_fd(pi->si); + pfd.fd = spawn_popen_read_fd(pi); pfd.events = POLLIN; int timeout = 3000; // milliseconds @@ -93,7 +93,7 @@ static enum cgroups_systemd_setting cgroups_detect_systemd(const char *exec) } else if (ret == 0) { collector_info("Cannot get the output of \"%s\" within timeout (%d ms)", exec, timeout); } else { - while (fgets(buf, MAXSIZE_PROC_CMDLINE, pi->child_stdout_fp) != NULL) { + while (fgets(buf, MAXSIZE_PROC_CMDLINE, spawn_popen_stdout(pi)) != NULL) { if ((begin = strstr(buf, SYSTEMD_HIERARCHY_STRING))) { end = begin = begin + strlen(SYSTEMD_HIERARCHY_STRING); if (!*begin) @@ -153,18 +153,18 @@ static enum cgroups_type cgroups_try_detect_version() int cgroups2_available = 0; // 1. check if cgroups2 available on system at all - POPEN_INSTANCE *instance = spawn_popen_run("grep cgroup /proc/filesystems"); - if(!instance) { + POPEN_INSTANCE *pi = spawn_popen_run("grep cgroup /proc/filesystems"); + if(!pi) { collector_error("cannot run 'grep cgroup /proc/filesystems'"); return CGROUPS_AUTODETECT_FAIL; } - while (fgets(buf, MAXSIZE_PROC_CMDLINE, instance->child_stdout_fp) != NULL) { + while (fgets(buf, MAXSIZE_PROC_CMDLINE, spawn_popen_stdout(pi)) != NULL) { if (strstr(buf, "cgroup2")) { cgroups2_available = 1; break; } } - if(spawn_popen_wait(instance) != 0) + if(spawn_popen_wait(pi) != 0) return CGROUPS_AUTODETECT_FAIL; if(!cgroups2_available) diff --git a/src/collectors/diskspace.plugin/plugin_diskspace.c b/src/collectors/diskspace.plugin/plugin_diskspace.c index f1d8909b2c8439..920db1861bfcab 100644 --- a/src/collectors/diskspace.plugin/plugin_diskspace.c +++ b/src/collectors/diskspace.plugin/plugin_diskspace.c @@ -629,7 +629,7 @@ static void diskspace_main_cleanup(void *pptr) { #error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 3 #endif -int diskspace_function_mount_points(BUFFER *wb, const char *function __maybe_unused) { +static int diskspace_function_mount_points(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { netdata_mutex_lock(&slow_mountinfo_mutex); buffer_flush(wb); diff --git a/src/collectors/freeipmi.plugin/freeipmi_plugin.c b/src/collectors/freeipmi.plugin/freeipmi_plugin.c index 38fb1d19b9a086..2af333a1d43b95 100644 --- a/src/collectors/freeipmi.plugin/freeipmi_plugin.c +++ b/src/collectors/freeipmi.plugin/freeipmi_plugin.c @@ -1637,7 +1637,8 @@ static void freeimi_function_sensors(const char *transaction, char *function __m // ---------------------------------------------------------------------------- // main, command line arguments parsing -static NORETURN void plugin_exit(int code) { +static void plugin_exit(int code) NORETURN; +static void plugin_exit(int code) { fflush(stdout); function_plugin_should_exit = true; exit(code); diff --git a/src/collectors/plugins.d/plugins_d.c b/src/collectors/plugins.d/plugins_d.c index 85f1563c3de069..6ae71c76951f84 100644 --- a/src/collectors/plugins.d/plugins_d.c +++ b/src/collectors/plugins.d/plugins_d.c @@ -158,7 +158,7 @@ static void *pluginsd_worker_thread(void *arg) { rrdhost_hostname(cd->host), cd->cmd); break; } - cd->unsafe.pid = spawn_server_instance_pid(cd->unsafe.pi->si); + cd->unsafe.pid = spawn_popen_pid(cd->unsafe.pi); nd_log(NDLS_DAEMON, NDLP_DEBUG, "PLUGINSD: 'host:%s' connected to '%s' running on pid %d", @@ -181,7 +181,10 @@ static void *pluginsd_worker_thread(void *arg) { }; ND_LOG_STACK_PUSH(lgs); - count = pluginsd_process(cd->host, cd, cd->unsafe.pi->child_stdin_fp, cd->unsafe.pi->child_stdout_fp, 0); + count = pluginsd_process(cd->host, cd, + spawn_popen_read_fd(cd->unsafe.pi), + spawn_popen_write_fd(cd->unsafe.pi), + 0); nd_log(NDLS_DAEMON, NDLP_DEBUG, "PLUGINSD: 'host:%s', '%s' (pid %d) disconnected after %zu successful data collections (ENDs).", diff --git a/src/collectors/plugins.d/plugins_d.h b/src/collectors/plugins.d/plugins_d.h index 51efa5a72efa2b..5b3a233a07cb65 100644 --- a/src/collectors/plugins.d/plugins_d.h +++ b/src/collectors/plugins.d/plugins_d.h @@ -46,7 +46,10 @@ struct plugind { extern struct plugind *pluginsd_root; -size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugin_input, FILE *fp_plugin_output, int trust_durations); +size_t pluginsd_process(RRDHOST *host, struct plugind *cd, int fd_input, int fd_output, int trust_durations); + +struct parser; +void pluginsd_process_cleanup(struct parser *parser); void pluginsd_process_thread_cleanup(void *pptr); size_t pluginsd_initialize_plugin_directories(); diff --git a/src/collectors/plugins.d/pluginsd_internals.c b/src/collectors/plugins.d/pluginsd_internals.c index 31f0f75393a366..c57362506155f2 100644 --- a/src/collectors/plugins.d/pluginsd_internals.c +++ b/src/collectors/plugins.d/pluginsd_internals.c @@ -2,10 +2,8 @@ #include "pluginsd_internals.h" -ssize_t send_to_plugin(const char *txt, void *data) { - PARSER *parser = data; - - if(!txt || !*txt) +ssize_t send_to_plugin(const char *txt, PARSER *parser) { + if(!txt || !*txt || !parser) return 0; #ifdef ENABLE_H2O @@ -17,7 +15,6 @@ ssize_t send_to_plugin(const char *txt, void *data) { spinlock_lock(&parser->writer.spinlock); ssize_t bytes = -1; -#ifdef ENABLE_HTTPS NETDATA_SSL *ssl = parser->ssl_output; if(ssl) { @@ -30,29 +27,14 @@ ssize_t send_to_plugin(const char *txt, void *data) { spinlock_unlock(&parser->writer.spinlock); return bytes; } -#endif - - if(parser->fp_output) { - - bytes = fprintf(parser->fp_output, "%s", txt); - if(bytes <= 0) { - netdata_log_error("PLUGINSD: cannot send command (FILE)"); - bytes = -2; - } - else - fflush(parser->fp_output); - spinlock_unlock(&parser->writer.spinlock); - return bytes; - } - - if(parser->fd != -1) { + if(parser->fd_output != -1) { bytes = 0; ssize_t total = (ssize_t)strlen(txt); ssize_t sent; do { - sent = write(parser->fd, &txt[bytes], total - bytes); + sent = write(parser->fd_output, &txt[bytes], total - bytes); if(sent <= 0) { netdata_log_error("PLUGINSD: cannot send command (fd)"); spinlock_unlock(&parser->writer.spinlock); @@ -100,19 +82,16 @@ void parser_destroy(PARSER *parser) { } -PARSER *parser_init(struct parser_user_object *user, FILE *fp_input, FILE *fp_output, int fd, +PARSER *parser_init(struct parser_user_object *user, int fd_input, int fd_output, PARSER_INPUT_TYPE flags, void *ssl __maybe_unused) { PARSER *parser; parser = callocz(1, sizeof(*parser)); if(user) parser->user = *user; - parser->fd = fd; - parser->fp_input = fp_input; - parser->fp_output = fp_output; -#ifdef ENABLE_HTTPS + parser->fd_input = fd_input; + parser->fd_output = fd_output; parser->ssl_output = ssl; -#endif parser->flags = flags; spinlock_init(&parser->writer.spinlock); diff --git a/src/collectors/plugins.d/pluginsd_internals.h b/src/collectors/plugins.d/pluginsd_internals.h index ae7e994277dea7..ed0714dd2e5982 100644 --- a/src/collectors/plugins.d/pluginsd_internals.h +++ b/src/collectors/plugins.d/pluginsd_internals.h @@ -13,7 +13,7 @@ PARSER_RC PLUGINSD_DISABLE_PLUGIN(PARSER *parser, const char *keyword, const char *msg); -ssize_t send_to_plugin(const char *txt, void *data); +ssize_t send_to_plugin(const char *txt, PARSER *parser); static inline RRDHOST *pluginsd_require_scope_host(PARSER *parser, const char *cmd) { RRDHOST *host = parser->user.host; diff --git a/src/collectors/plugins.d/pluginsd_parser.c b/src/collectors/plugins.d/pluginsd_parser.c index d15ecbe94e8ef7..a142c6ccccd0a7 100644 --- a/src/collectors/plugins.d/pluginsd_parser.c +++ b/src/collectors/plugins.d/pluginsd_parser.c @@ -1081,52 +1081,7 @@ static inline PARSER_RC pluginsd_exit(char **words __maybe_unused, size_t num_wo return PARSER_RC_STOP; } -static inline PARSER_RC streaming_claimed_id(char **words, size_t num_words, PARSER *parser) -{ - const char *host_uuid_str = get_word(words, num_words, 1); - const char *claim_id_str = get_word(words, num_words, 2); - - if (!host_uuid_str || !claim_id_str) { - netdata_log_error("Command CLAIMED_ID came malformed, uuid = '%s', claim_id = '%s'", - host_uuid_str ? host_uuid_str : "[unset]", - claim_id_str ? claim_id_str : "[unset]"); - return PARSER_RC_ERROR; - } - - nd_uuid_t uuid; - RRDHOST *host = parser->user.host; - - // We don't need the parsed UUID - // just do it to check the format - if(uuid_parse(host_uuid_str, uuid)) { - netdata_log_error("1st parameter (host GUID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", host_uuid_str); - return PARSER_RC_ERROR; - } - if(uuid_parse(claim_id_str, uuid) && strcmp(claim_id_str, "NULL") != 0) { - netdata_log_error("2nd parameter (Claim ID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", claim_id_str); - return PARSER_RC_ERROR; - } - - if(strcmp(host_uuid_str, host->machine_guid) != 0) { - netdata_log_error("Claim ID is for host \"%s\" but it came over connection for \"%s\"", host_uuid_str, host->machine_guid); - return PARSER_RC_OK; //the message is OK problem must be somewhere else - } - - rrdhost_aclk_state_lock(host); - - if (host->aclk_state.claimed_id) - freez(host->aclk_state.claimed_id); - - host->aclk_state.claimed_id = strcmp(claim_id_str, "NULL") ? strdupz(claim_id_str) : NULL; - - rrdhost_aclk_state_unlock(host); - - rrdhost_flag_set(host, RRDHOST_FLAG_METADATA_CLAIMID |RRDHOST_FLAG_METADATA_UPDATE); - - rrdpush_send_claimed_id(host); - - return PARSER_RC_OK; -} +PARSER_RC rrdpush_receiver_pluginsd_claimed_id(char **words, size_t num_words, PARSER *parser); // ---------------------------------------------------------------------------- @@ -1135,8 +1090,7 @@ void pluginsd_cleanup_v2(PARSER *parser) { pluginsd_clear_scope_chart(parser, "THREAD CLEANUP"); } -void pluginsd_process_thread_cleanup(void *pptr) { - PARSER *parser = CLEANUP_FUNCTION_GET_PTR(pptr); +void pluginsd_process_cleanup(PARSER *parser) { if(!parser) return; pluginsd_cleanup_v2(parser); @@ -1154,6 +1108,11 @@ void pluginsd_process_thread_cleanup(void *pptr) { parser_destroy(parser); } +void pluginsd_process_thread_cleanup(void *pptr) { + PARSER *parser = CLEANUP_FUNCTION_GET_PTR(pptr); + pluginsd_process_cleanup(parser); +} + bool parser_reconstruct_node(BUFFER *wb, void *ptr) { PARSER *parser = ptr; if(!parser || !parser->user.host) @@ -1181,30 +1140,15 @@ bool parser_reconstruct_context(BUFFER *wb, void *ptr) { return true; } -inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugin_input, FILE *fp_plugin_output, int trust_durations) +inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, int fd_input, int fd_output, int trust_durations) { int enabled = cd->unsafe.enabled; - if (!fp_plugin_input || !fp_plugin_output || !enabled) { + if (fd_input == -1 || fd_output == -1 || !enabled) { cd->unsafe.enabled = 0; return 0; } - if (unlikely(fileno(fp_plugin_input) == -1)) { - netdata_log_error("input file descriptor given is not a valid stream"); - cd->serial_failures++; - return 0; - } - - if (unlikely(fileno(fp_plugin_output) == -1)) { - netdata_log_error("output file descriptor given is not a valid stream"); - cd->serial_failures++; - return 0; - } - - clearerr(fp_plugin_input); - clearerr(fp_plugin_output); - PARSER *parser; { PARSER_USER_OBJECT user = { @@ -1214,8 +1158,7 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugi .trust_durations = trust_durations }; - // fp_plugin_output = our input; fp_plugin_input = our output - parser = parser_init(&user, fp_plugin_output, fp_plugin_input, -1, PARSER_INPUT_SPLIT, NULL); + parser = parser_init(&user, fd_input, fd_output, PARSER_INPUT_SPLIT, NULL); } pluginsd_keywords_init(parser, PARSER_INIT_PLUGINSD); @@ -1240,10 +1183,8 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugi if(unlikely(!buffered_reader_next_line(&parser->reader, buffer))) { buffered_reader_ret_t ret = buffered_reader_read_timeout( - &parser->reader, - fileno((FILE *) parser->fp_input), - 2 * 60 * MSEC_PER_SEC, true - ); + &parser->reader, parser->fd_input, + 2 * 60 * MSEC_PER_SEC, true); if(unlikely(ret != BUFFERED_READER_READ_OK)) break; @@ -1320,7 +1261,7 @@ PARSER_RC parser_execute(PARSER *parser, const PARSER_KEYWORD *keyword, char **w case PLUGINSD_KEYWORD_ID_VARIABLE: return pluginsd_variable(words, num_words, parser); case PLUGINSD_KEYWORD_ID_CLAIMED_ID: - return streaming_claimed_id(words, num_words, parser); + return rrdpush_receiver_pluginsd_claimed_id(words, num_words, parser); case PLUGINSD_KEYWORD_ID_HOST: return pluginsd_host(words, num_words, parser); case PLUGINSD_KEYWORD_ID_HOST_DEFINE: @@ -1362,7 +1303,7 @@ void parser_init_repertoire(PARSER *parser, PARSER_REPERTOIRE repertoire) { } int pluginsd_parser_unittest(void) { - PARSER *p = parser_init(NULL, NULL, NULL, -1, PARSER_INPUT_SPLIT, NULL); + PARSER *p = parser_init(NULL, -1, -1, PARSER_INPUT_SPLIT, NULL); pluginsd_keywords_init(p, PARSER_INIT_PLUGINSD | PARSER_INIT_STREAMING); char *lines[] = { diff --git a/src/collectors/plugins.d/pluginsd_parser.h b/src/collectors/plugins.d/pluginsd_parser.h index 6c126964bb84cf..baf66df291c4ba 100644 --- a/src/collectors/plugins.d/pluginsd_parser.h +++ b/src/collectors/plugins.d/pluginsd_parser.h @@ -93,17 +93,15 @@ typedef struct parser_user_object { } v2; } PARSER_USER_OBJECT; -typedef struct parser { +struct parser { uint8_t version; // Parser version PARSER_REPERTOIRE repertoire; uint32_t flags; - int fd; // Socket - FILE *fp_input; // Input source e.g. stream - FILE *fp_output; // Stream to send commands to plugin + int fd_input; + int fd_output; -#ifdef ENABLE_HTTPS NETDATA_SSL *ssl_output; -#endif + #ifdef ENABLE_H2O void *h2o_ctx; // if set we use h2o_stream functions to send data #endif @@ -129,10 +127,11 @@ typedef struct parser { struct { SPINLOCK spinlock; } writer; +}; -} PARSER; +typedef struct parser PARSER; -PARSER *parser_init(struct parser_user_object *user, FILE *fp_input, FILE *fp_output, int fd, PARSER_INPUT_TYPE flags, void *ssl); +PARSER *parser_init(struct parser_user_object *user, int fd_input, int fd_output, PARSER_INPUT_TYPE flags, void *ssl); void parser_init_repertoire(PARSER *parser, PARSER_REPERTOIRE repertoire); void parser_destroy(PARSER *working_parser); void pluginsd_cleanup_v2(PARSER *parser); diff --git a/src/collectors/proc.plugin/proc_diskstats.c b/src/collectors/proc.plugin/proc_diskstats.c index 015a985cc4cf57..4cbd618f666149 100644 --- a/src/collectors/proc.plugin/proc_diskstats.c +++ b/src/collectors/proc.plugin/proc_diskstats.c @@ -998,7 +998,7 @@ static void disk_labels_cb(RRDSET *st, void *data) { add_labels_to_disk(data, st); } -static int diskstats_function_block_devices(BUFFER *wb, const char *function __maybe_unused) { +static int diskstats_function_block_devices(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { buffer_flush(wb); wb->content_type = CT_APPLICATION_JSON; buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); diff --git a/src/collectors/proc.plugin/proc_net_dev.c b/src/collectors/proc.plugin/proc_net_dev.c index 40702c38761163..213c9e45857ecf 100644 --- a/src/collectors/proc.plugin/proc_net_dev.c +++ b/src/collectors/proc.plugin/proc_net_dev.c @@ -473,7 +473,7 @@ static void netdev_rename_this_device(struct netdev *d) { // ---------------------------------------------------------------------------- -int netdev_function_net_interfaces(BUFFER *wb, const char *function __maybe_unused) { +static int netdev_function_net_interfaces(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { buffer_flush(wb); wb->content_type = CT_APPLICATION_JSON; buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); diff --git a/src/collectors/statsd.plugin/statsd.c b/src/collectors/statsd.plugin/statsd.c index f83818059fe278..42c5ae0c720ac4 100644 --- a/src/collectors/statsd.plugin/statsd.c +++ b/src/collectors/statsd.plugin/statsd.c @@ -1283,7 +1283,7 @@ static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHA // find the directory name from the file we already read char *filename2 = strdupz(filename); // copy filename, since dirname() will change it char *dir = dirname(filename2); // find the directory part of the filename - tmp = strdupz_path_subpath(dir, s); // compose the new filename to read; + tmp = filename_from_path_entry_strdupz(dir, s); // compose the new filename to read; freez(filename2); // free the filename we copied } statsd_readfile(tmp, app, chart, dict); diff --git a/src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c b/src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c index 469f9d2cf8d5ff..d8098bd6c93cb7 100644 --- a/src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c +++ b/src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c @@ -58,6 +58,10 @@ static int systemd_journal_directories_dyncfg_update(BUFFER *result, BUFFER *pay struct json_object *journalDirectories; json_object_object_get_ex(jobj, JOURNAL_DIRECTORIES_JSON_NODE, &journalDirectories); + if (json_object_get_type(journalDirectories) != json_type_array) + return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST, + "member " JOURNAL_DIRECTORIES_JSON_NODE " is not an array"); + size_t n_directories = json_object_array_length(journalDirectories); if(n_directories > MAX_JOURNAL_DIRECTORIES) return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST, "too many directories configured"); diff --git a/src/collectors/systemd-journal.plugin/systemd-journal.c b/src/collectors/systemd-journal.plugin/systemd-journal.c index 6da9c687e02b71..197bfd5ce1d770 100644 --- a/src/collectors/systemd-journal.plugin/systemd-journal.c +++ b/src/collectors/systemd-journal.plugin/systemd-journal.c @@ -1413,7 +1413,7 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU } static void netdata_systemd_journal_function_help(const char *transaction) { - BUFFER *wb = buffer_create(0, NULL); + CLEAN_BUFFER *wb = buffer_create(0, NULL); buffer_sprintf(wb, "%s / %s\n" "\n" @@ -1517,162 +1517,225 @@ static void netdata_systemd_journal_function_help(const char *transaction) { netdata_mutex_lock(&stdout_mutex); pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb); netdata_mutex_unlock(&stdout_mutex); +} - buffer_free(wb); +typedef struct { + FACET_KEY_OPTIONS default_facet; + bool info; + bool data_only; + bool slice; + bool delta; + bool tail; + time_t after_s; + time_t before_s; + usec_t anchor; + usec_t if_modified_since; + size_t last; + FACETS_ANCHOR_DIRECTION direction; + const char *query; + const char *chart; + SIMPLE_PATTERN *sources; + SD_JOURNAL_FILE_SOURCE_TYPE source_type; + size_t filters; + size_t sampling; +} JOURNAL_QUERY; + +static SD_JOURNAL_FILE_SOURCE_TYPE get_internal_source_type(const char *value) { + if(strcmp(value, SDJF_SOURCE_ALL_NAME) == 0) + return SDJF_ALL; + else if(strcmp(value, SDJF_SOURCE_LOCAL_NAME) == 0) + return SDJF_LOCAL_ALL; + else if(strcmp(value, SDJF_SOURCE_REMOTES_NAME) == 0) + return SDJF_REMOTE_ALL; + else if(strcmp(value, SDJF_SOURCE_NAMESPACES_NAME) == 0) + return SDJF_LOCAL_NAMESPACE; + else if(strcmp(value, SDJF_SOURCE_LOCAL_SYSTEM_NAME) == 0) + return SDJF_LOCAL_SYSTEM; + else if(strcmp(value, SDJF_SOURCE_LOCAL_USERS_NAME) == 0) + return SDJF_LOCAL_USER; + else if(strcmp(value, SDJF_SOURCE_LOCAL_OTHER_NAME) == 0) + return SDJF_LOCAL_OTHER; + + return SDJF_NONE; } -void function_systemd_journal(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled, - BUFFER *payload __maybe_unused, HTTP_ACCESS access __maybe_unused, - const char *source __maybe_unused, void *data __maybe_unused) { - fstat_thread_calls = 0; - fstat_thread_cached_responses = 0; +static FACETS_ANCHOR_DIRECTION get_direction(const char *value) { + return strcasecmp(value, "forward") == 0 ? FACETS_ANCHOR_DIRECTION_FORWARD : FACETS_ANCHOR_DIRECTION_BACKWARD; +} - BUFFER *wb = buffer_create(0, NULL); - buffer_flush(wb); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); +struct post_query_data { + const char *transaction; + FACETS *facets; + JOURNAL_QUERY *q; + BUFFER *wb; +}; + +static bool parse_json_payload(json_object *jobj, const char *path, void *data, BUFFER *error) { + struct post_query_data *qd = data; + JOURNAL_QUERY *q = qd->q; + BUFFER *wb = qd->wb; + FACETS *facets = qd->facets; + // const char *transaction = qd->transaction; + + buffer_flush(error); + + JSONC_PARSE_BOOL_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_INFO, q->info, error, false); + JSONC_PARSE_BOOL_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_DELTA, q->delta, error, false); + JSONC_PARSE_BOOL_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_TAIL, q->tail, error, false); + JSONC_PARSE_BOOL_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_SLICE, q->slice, error, false); + JSONC_PARSE_BOOL_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_DATA_ONLY, q->data_only, error, false); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_SAMPLING, q->sampling, error, false); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_AFTER, q->after_s, error, false); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_BEFORE, q->before_s, error, false); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_IF_MODIFIED_SINCE, q->if_modified_since, error, false); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_ANCHOR, q->anchor, error, false); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_LAST, q->last, error, false); + JSONC_PARSE_TXT2ENUM_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_DIRECTION, get_direction, q->direction, error, false); + JSONC_PARSE_TXT2STRDUPZ_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_QUERY, q->query, error, false); + JSONC_PARSE_TXT2STRDUPZ_OR_ERROR_AND_RETURN(jobj, path, JOURNAL_PARAMETER_HISTOGRAM, q->chart, error, false); + + json_object *sources; + if (json_object_object_get_ex(jobj, JOURNAL_PARAMETER_SOURCE, &sources)) { + if (json_object_get_type(sources) != json_type_array) { + buffer_sprintf(error, "member '%s' is not an array", JOURNAL_PARAMETER_SOURCE); + return false; + } - FUNCTION_QUERY_STATUS tmp_fqs = { - .cancelled = cancelled, - .stop_monotonic_ut = stop_monotonic_ut, - }; - FUNCTION_QUERY_STATUS *fqs = NULL; + buffer_json_member_add_array(wb, JOURNAL_PARAMETER_SOURCE); - FACETS *facets = facets_create(50, FACETS_OPTION_ALL_KEYS_FTS, - SYSTEMD_ALWAYS_VISIBLE_KEYS, - SYSTEMD_KEYS_INCLUDED_IN_FACETS, - SYSTEMD_KEYS_EXCLUDED_FROM_FACETS); + CLEAN_BUFFER *sources_list = buffer_create(0, NULL); - facets_accepted_param(facets, JOURNAL_PARAMETER_INFO); - facets_accepted_param(facets, JOURNAL_PARAMETER_SOURCE); - facets_accepted_param(facets, JOURNAL_PARAMETER_AFTER); - facets_accepted_param(facets, JOURNAL_PARAMETER_BEFORE); - facets_accepted_param(facets, JOURNAL_PARAMETER_ANCHOR); - facets_accepted_param(facets, JOURNAL_PARAMETER_DIRECTION); - facets_accepted_param(facets, JOURNAL_PARAMETER_LAST); - facets_accepted_param(facets, JOURNAL_PARAMETER_QUERY); - facets_accepted_param(facets, JOURNAL_PARAMETER_FACETS); - facets_accepted_param(facets, JOURNAL_PARAMETER_HISTOGRAM); - facets_accepted_param(facets, JOURNAL_PARAMETER_IF_MODIFIED_SINCE); - facets_accepted_param(facets, JOURNAL_PARAMETER_DATA_ONLY); - facets_accepted_param(facets, JOURNAL_PARAMETER_DELTA); - facets_accepted_param(facets, JOURNAL_PARAMETER_TAIL); - facets_accepted_param(facets, JOURNAL_PARAMETER_SAMPLING); + q->source_type = SDJF_NONE; -#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS - facets_accepted_param(facets, JOURNAL_PARAMETER_SLICE); -#endif // HAVE_SD_JOURNAL_RESTART_FIELDS + size_t sources_len = json_object_array_length(sources); + for (size_t i = 0; i < sources_len; i++) { + json_object *src = json_object_array_get_idx(sources, i); - // register the fields in the order you want them on the dashboard + if (json_object_get_type(src) != json_type_string) { + buffer_sprintf(error, "sources array item %zu is not a string", i); + return false; + } - facets_register_row_severity(facets, syslog_priority_to_facet_severity, NULL); + const char *value = json_object_get_string(src); + buffer_json_add_array_item_string(wb, value); - facets_register_key_name(facets, "_HOSTNAME", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_VISIBLE); + SD_JOURNAL_FILE_SOURCE_TYPE t = get_internal_source_type(value); + if(t != SDJF_NONE) { + q->source_type |= t; + value = NULL; + } + else { + // else, match the source, whatever it is + if(buffer_strlen(sources_list)) + buffer_putc(sources_list, '|'); - facets_register_dynamic_key_name(facets, JOURNAL_KEY_ND_JOURNAL_PROCESS, - FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_VISIBLE, - netdata_systemd_journal_dynamic_row_id, NULL); + buffer_strcat(sources_list, value); + } + } - facets_register_key_name(facets, "MESSAGE", - FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | - FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS); + if(buffer_strlen(sources_list)) { + simple_pattern_free(q->sources); + q->sources = simple_pattern_create(buffer_tostring(sources_list), "|", SIMPLE_PATTERN_EXACT, false); + } -// facets_register_dynamic_key_name(facets, "MESSAGE", -// FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | FACET_KEY_OPTION_RICH_TEXT | -// FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS, -// netdata_systemd_journal_rich_message, NULL); + buffer_json_array_close(wb); // source + } - facets_register_key_name_transformation(facets, "PRIORITY", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW | - FACET_KEY_OPTION_EXPANDED_FILTER, - netdata_systemd_journal_transform_priority, NULL); + json_object *fcts; + if (json_object_object_get_ex(jobj, JOURNAL_PARAMETER_FACETS, &fcts)) { + if (json_object_get_type(sources) != json_type_array) { + buffer_sprintf(error, "member '%s' is not an array", JOURNAL_PARAMETER_FACETS); + return false; + } - facets_register_key_name_transformation(facets, "SYSLOG_FACILITY", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW | - FACET_KEY_OPTION_EXPANDED_FILTER, - netdata_systemd_journal_transform_syslog_facility, NULL); + q->default_facet = FACET_KEY_OPTION_NONE; + facets_reset_and_disable_all_facets(facets); - facets_register_key_name_transformation(facets, "ERRNO", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_errno, NULL); + buffer_json_member_add_array(wb, JOURNAL_PARAMETER_FACETS); - facets_register_key_name(facets, JOURNAL_KEY_ND_JOURNAL_FILE, - FACET_KEY_OPTION_NEVER_FACET); + size_t facets_len = json_object_array_length(fcts); + for (size_t i = 0; i < facets_len; i++) { + json_object *fct = json_object_array_get_idx(fcts, i); - facets_register_key_name(facets, "SYSLOG_IDENTIFIER", - FACET_KEY_OPTION_FACET); + if (json_object_get_type(fct) != json_type_string) { + buffer_sprintf(error, "facets array item %zu is not a string", i); + return false; + } - facets_register_key_name(facets, "UNIT", - FACET_KEY_OPTION_FACET); + const char *value = json_object_get_string(fct); + facets_register_facet(facets, value, FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS|FACET_KEY_OPTION_REORDER); + buffer_json_add_array_item_string(wb, value); + } - facets_register_key_name(facets, "USER_UNIT", - FACET_KEY_OPTION_FACET); + buffer_json_array_close(wb); // facets + } - facets_register_key_name_transformation(facets, "MESSAGE_ID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW | - FACET_KEY_OPTION_EXPANDED_FILTER, - netdata_systemd_journal_transform_message_id, NULL); + json_object *selections; + if (json_object_object_get_ex(jobj, "selections", &selections)) { + if (json_object_get_type(selections) != json_type_object) { + buffer_sprintf(error, "member 'selections' is not an object"); + return false; + } - facets_register_key_name_transformation(facets, "_BOOT_ID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_boot_id, NULL); + buffer_json_member_add_object(wb, "selections"); - facets_register_key_name_transformation(facets, "_SYSTEMD_OWNER_UID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_uid, NULL); + json_object_object_foreach(selections, key, val) { + if (json_object_get_type(val) != json_type_array) { + buffer_sprintf(error, "selection '%s' is not an array", key); + return false; + } - facets_register_key_name_transformation(facets, "_UID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_uid, NULL); + buffer_json_member_add_array(wb, key); - facets_register_key_name_transformation(facets, "OBJECT_SYSTEMD_OWNER_UID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_uid, NULL); + size_t values_len = json_object_array_length(val); + for (size_t i = 0; i < values_len; i++) { + json_object *value_obj = json_object_array_get_idx(val, i); - facets_register_key_name_transformation(facets, "OBJECT_UID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_uid, NULL); + if (json_object_get_type(value_obj) != json_type_string) { + buffer_sprintf(error, "selection '%s' array item %zu is not a string", key, i); + return false; + } - facets_register_key_name_transformation(facets, "_GID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_gid, NULL); + const char *value = json_object_get_string(value_obj); - facets_register_key_name_transformation(facets, "OBJECT_GID", - FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_gid, NULL); + // Call facets_register_facet_id_filter for each value + facets_register_facet_filter( + facets, key, value, FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_REORDER); - facets_register_key_name_transformation(facets, "_CAP_EFFECTIVE", - FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_cap_effective, NULL); + buffer_json_add_array_item_string(wb, value); + q->filters++; + } - facets_register_key_name_transformation(facets, "_AUDIT_LOGINUID", - FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_uid, NULL); + buffer_json_array_close(wb); // key + } - facets_register_key_name_transformation(facets, "OBJECT_AUDIT_LOGINUID", - FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_uid, NULL); + buffer_json_object_close(wb); // selections + } - facets_register_key_name_transformation(facets, "_SOURCE_REALTIME_TIMESTAMP", - FACET_KEY_OPTION_TRANSFORM_VIEW, - netdata_systemd_journal_transform_timestamp_usec, NULL); + return true; +} - // ------------------------------------------------------------------------ - // parse the parameters +static bool parse_post_params(FACETS *facets, JOURNAL_QUERY *q, BUFFER *wb, BUFFER *payload, const char *transaction) { + struct post_query_data qd = { + .transaction = transaction, + .facets = facets, + .q = q, + .wb = wb, + }; - bool info = false, data_only = false, slice = JOURNAL_DEFAULT_SLICE_MODE, delta = false, tail = false; - time_t after_s = 0, before_s = 0; - usec_t anchor = 0; - usec_t if_modified_since = 0; - size_t last = 0; - FACETS_ANCHOR_DIRECTION direction = JOURNAL_DEFAULT_DIRECTION; - const char *query = NULL; - const char *chart = NULL; - SIMPLE_PATTERN *sources = NULL; - SD_JOURNAL_FILE_SOURCE_TYPE source_type = SDJF_ALL; - size_t filters = 0; - size_t sampling = SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING; + int code; + CLEAN_JSON_OBJECT *jobj = json_parse_function_payload_or_error(wb, payload, &code, parse_json_payload, &qd); + if(!jobj || code != HTTP_RESP_OK) { + netdata_mutex_lock(&stdout_mutex); + pluginsd_function_result_to_stdout(transaction, code, "application/json", now_realtime_sec() + 1, wb); + netdata_mutex_unlock(&stdout_mutex); + return false; + } + + return true; +} +static bool parse_get_params(FACETS *facets, JOURNAL_QUERY *q, BUFFER *wb, char *function, const char *transaction) { buffer_json_member_add_object(wb, "_request"); char *words[SYSTEMD_JOURNAL_MAX_PARAMS] = { NULL }; @@ -1683,54 +1746,54 @@ void function_systemd_journal(const char *transaction, char *function, usec_t *s if(strcmp(keyword, JOURNAL_PARAMETER_HELP) == 0) { netdata_systemd_journal_function_help(transaction); - goto cleanup; + return false; } else if(strcmp(keyword, JOURNAL_PARAMETER_INFO) == 0) { - info = true; + q->info = true; } else if(strncmp(keyword, JOURNAL_PARAMETER_DELTA ":", sizeof(JOURNAL_PARAMETER_DELTA ":") - 1) == 0) { char *v = &keyword[sizeof(JOURNAL_PARAMETER_DELTA ":") - 1]; if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) - delta = false; + q->delta = false; else - delta = true; + q->delta = true; } else if(strncmp(keyword, JOURNAL_PARAMETER_TAIL ":", sizeof(JOURNAL_PARAMETER_TAIL ":") - 1) == 0) { char *v = &keyword[sizeof(JOURNAL_PARAMETER_TAIL ":") - 1]; if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) - tail = false; + q->tail = false; else - tail = true; + q->tail = true; } else if(strncmp(keyword, JOURNAL_PARAMETER_SAMPLING ":", sizeof(JOURNAL_PARAMETER_SAMPLING ":") - 1) == 0) { - sampling = str2ul(&keyword[sizeof(JOURNAL_PARAMETER_SAMPLING ":") - 1]); + q->sampling = str2ul(&keyword[sizeof(JOURNAL_PARAMETER_SAMPLING ":") - 1]); } else if(strncmp(keyword, JOURNAL_PARAMETER_DATA_ONLY ":", sizeof(JOURNAL_PARAMETER_DATA_ONLY ":") - 1) == 0) { char *v = &keyword[sizeof(JOURNAL_PARAMETER_DATA_ONLY ":") - 1]; if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) - data_only = false; + q->data_only = false; else - data_only = true; + q->data_only = true; } else if(strncmp(keyword, JOURNAL_PARAMETER_SLICE ":", sizeof(JOURNAL_PARAMETER_SLICE ":") - 1) == 0) { char *v = &keyword[sizeof(JOURNAL_PARAMETER_SLICE ":") - 1]; if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0) - slice = false; + q->slice = false; else - slice = true; + q->slice = true; } else if(strncmp(keyword, JOURNAL_PARAMETER_SOURCE ":", sizeof(JOURNAL_PARAMETER_SOURCE ":") - 1) == 0) { const char *value = &keyword[sizeof(JOURNAL_PARAMETER_SOURCE ":") - 1]; buffer_json_member_add_array(wb, JOURNAL_PARAMETER_SOURCE); - BUFFER *sources_list = buffer_create(0, NULL); + CLEAN_BUFFER *sources_list = buffer_create(0, NULL); - source_type = SDJF_NONE; + q->source_type = SDJF_NONE; while(value) { char *sep = strchr(value, ','); if(sep) @@ -1738,38 +1801,15 @@ void function_systemd_journal(const char *transaction, char *function, usec_t *s buffer_json_add_array_item_string(wb, value); - if(strcmp(value, SDJF_SOURCE_ALL_NAME) == 0) { - source_type |= SDJF_ALL; - value = NULL; - } - else if(strcmp(value, SDJF_SOURCE_LOCAL_NAME) == 0) { - source_type |= SDJF_LOCAL_ALL; - value = NULL; - } - else if(strcmp(value, SDJF_SOURCE_REMOTES_NAME) == 0) { - source_type |= SDJF_REMOTE_ALL; - value = NULL; - } - else if(strcmp(value, SDJF_SOURCE_NAMESPACES_NAME) == 0) { - source_type |= SDJF_LOCAL_NAMESPACE; - value = NULL; - } - else if(strcmp(value, SDJF_SOURCE_LOCAL_SYSTEM_NAME) == 0) { - source_type |= SDJF_LOCAL_SYSTEM; - value = NULL; - } - else if(strcmp(value, SDJF_SOURCE_LOCAL_USERS_NAME) == 0) { - source_type |= SDJF_LOCAL_USER; - value = NULL; - } - else if(strcmp(value, SDJF_SOURCE_LOCAL_OTHER_NAME) == 0) { - source_type |= SDJF_LOCAL_OTHER; + SD_JOURNAL_FILE_SOURCE_TYPE t = get_internal_source_type(value); + if(t != SDJF_NONE) { + q->source_type |= t; value = NULL; } else { // else, match the source, whatever it is if(buffer_strlen(sources_list)) - buffer_strcat(sources_list, ","); + buffer_putc(sources_list, '|'); buffer_strcat(sources_list, value); } @@ -1778,39 +1818,42 @@ void function_systemd_journal(const char *transaction, char *function, usec_t *s } if(buffer_strlen(sources_list)) { - simple_pattern_free(sources); - sources = simple_pattern_create(buffer_tostring(sources_list), ",", SIMPLE_PATTERN_EXACT, false); + simple_pattern_free(q->sources); + q->sources = simple_pattern_create(buffer_tostring(sources_list), "|", SIMPLE_PATTERN_EXACT, false); } - buffer_free(sources_list); - buffer_json_array_close(wb); // source } else if(strncmp(keyword, JOURNAL_PARAMETER_AFTER ":", sizeof(JOURNAL_PARAMETER_AFTER ":") - 1) == 0) { - after_s = str2l(&keyword[sizeof(JOURNAL_PARAMETER_AFTER ":") - 1]); + q->after_s = str2l(&keyword[sizeof(JOURNAL_PARAMETER_AFTER ":") - 1]); } else if(strncmp(keyword, JOURNAL_PARAMETER_BEFORE ":", sizeof(JOURNAL_PARAMETER_BEFORE ":") - 1) == 0) { - before_s = str2l(&keyword[sizeof(JOURNAL_PARAMETER_BEFORE ":") - 1]); + q->before_s = str2l(&keyword[sizeof(JOURNAL_PARAMETER_BEFORE ":") - 1]); } else if(strncmp(keyword, JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":", sizeof(JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":") - 1) == 0) { - if_modified_since = str2ull(&keyword[sizeof(JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":") - 1], NULL); + q->if_modified_since = str2ull(&keyword[sizeof(JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":") - 1], NULL); } else if(strncmp(keyword, JOURNAL_PARAMETER_ANCHOR ":", sizeof(JOURNAL_PARAMETER_ANCHOR ":") - 1) == 0) { - anchor = str2ull(&keyword[sizeof(JOURNAL_PARAMETER_ANCHOR ":") - 1], NULL); + q->anchor = str2ull(&keyword[sizeof(JOURNAL_PARAMETER_ANCHOR ":") - 1], NULL); } else if(strncmp(keyword, JOURNAL_PARAMETER_DIRECTION ":", sizeof(JOURNAL_PARAMETER_DIRECTION ":") - 1) == 0) { - direction = strcasecmp(&keyword[sizeof(JOURNAL_PARAMETER_DIRECTION ":") - 1], "forward") == 0 ? FACETS_ANCHOR_DIRECTION_FORWARD : FACETS_ANCHOR_DIRECTION_BACKWARD; + q->direction = get_direction(&keyword[sizeof(JOURNAL_PARAMETER_DIRECTION ":") - 1]); } else if(strncmp(keyword, JOURNAL_PARAMETER_LAST ":", sizeof(JOURNAL_PARAMETER_LAST ":") - 1) == 0) { - last = str2ul(&keyword[sizeof(JOURNAL_PARAMETER_LAST ":") - 1]); + q->last = str2ul(&keyword[sizeof(JOURNAL_PARAMETER_LAST ":") - 1]); } else if(strncmp(keyword, JOURNAL_PARAMETER_QUERY ":", sizeof(JOURNAL_PARAMETER_QUERY ":") - 1) == 0) { - query= &keyword[sizeof(JOURNAL_PARAMETER_QUERY ":") - 1]; + freez((void *)q->query); + q->query= strdupz(&keyword[sizeof(JOURNAL_PARAMETER_QUERY ":") - 1]); } else if(strncmp(keyword, JOURNAL_PARAMETER_HISTOGRAM ":", sizeof(JOURNAL_PARAMETER_HISTOGRAM ":") - 1) == 0) { - chart = &keyword[sizeof(JOURNAL_PARAMETER_HISTOGRAM ":") - 1]; + freez((void *)q->chart); + q->chart = strdupz(&keyword[sizeof(JOURNAL_PARAMETER_HISTOGRAM ":") - 1]); } else if(strncmp(keyword, JOURNAL_PARAMETER_FACETS ":", sizeof(JOURNAL_PARAMETER_FACETS ":") - 1) == 0) { + q->default_facet = FACET_KEY_OPTION_NONE; + facets_reset_and_disable_all_facets(facets); + char *value = &keyword[sizeof(JOURNAL_PARAMETER_FACETS ":") - 1]; if(*value) { buffer_json_member_add_array(wb, JOURNAL_PARAMETER_FACETS); @@ -1841,9 +1884,14 @@ void function_systemd_journal(const char *transaction, char *function, usec_t *s if(sep) *sep++ = '\0'; - facets_register_facet_id_filter(facets, keyword, value, FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS|FACET_KEY_OPTION_REORDER); + facets_register_facet_filter_id( + facets, + keyword, + value, + FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_REORDER); + buffer_json_add_array_item_string(wb, value); - filters++; + q->filters++; value = sep; } @@ -1853,6 +1901,197 @@ void function_systemd_journal(const char *transaction, char *function, usec_t *s } } + return true; +} + +void function_systemd_journal(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled, + BUFFER *payload, HTTP_ACCESS access __maybe_unused, + const char *source __maybe_unused, void *data __maybe_unused) { + fstat_thread_calls = 0; + fstat_thread_cached_responses = 0; + + CLEAN_BUFFER *wb = buffer_create(0, NULL); + buffer_flush(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + + FUNCTION_QUERY_STATUS tmp_fqs = { + .cancelled = cancelled, + .stop_monotonic_ut = stop_monotonic_ut, + }; + FUNCTION_QUERY_STATUS *fqs = NULL; + + FACETS *facets = facets_create(50, FACETS_OPTION_ALL_KEYS_FTS, + SYSTEMD_ALWAYS_VISIBLE_KEYS, + SYSTEMD_KEYS_INCLUDED_IN_FACETS, + SYSTEMD_KEYS_EXCLUDED_FROM_FACETS); + + facets_accepted_param(facets, JOURNAL_PARAMETER_INFO); + facets_accepted_param(facets, JOURNAL_PARAMETER_SOURCE); + facets_accepted_param(facets, JOURNAL_PARAMETER_AFTER); + facets_accepted_param(facets, JOURNAL_PARAMETER_BEFORE); + facets_accepted_param(facets, JOURNAL_PARAMETER_ANCHOR); + facets_accepted_param(facets, JOURNAL_PARAMETER_DIRECTION); + facets_accepted_param(facets, JOURNAL_PARAMETER_LAST); + facets_accepted_param(facets, JOURNAL_PARAMETER_QUERY); + facets_accepted_param(facets, JOURNAL_PARAMETER_FACETS); + facets_accepted_param(facets, JOURNAL_PARAMETER_HISTOGRAM); + facets_accepted_param(facets, JOURNAL_PARAMETER_IF_MODIFIED_SINCE); + facets_accepted_param(facets, JOURNAL_PARAMETER_DATA_ONLY); + facets_accepted_param(facets, JOURNAL_PARAMETER_DELTA); + facets_accepted_param(facets, JOURNAL_PARAMETER_TAIL); + facets_accepted_param(facets, JOURNAL_PARAMETER_SAMPLING); + +#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS + facets_accepted_param(facets, JOURNAL_PARAMETER_SLICE); +#endif // HAVE_SD_JOURNAL_RESTART_FIELDS + + // ------------------------------------------------------------------------ + // parse the parameters + + JOURNAL_QUERY q = { + .default_facet = FACET_KEY_OPTION_FACET, + .info = false, + .data_only = false, + .slice = JOURNAL_DEFAULT_SLICE_MODE, + .delta = false, + .tail = false, + .after_s = 0, + .before_s = 0, + .anchor = 0, + .if_modified_since = 0, + .last = 0, + .direction = JOURNAL_DEFAULT_DIRECTION, + .query = NULL, + .chart = NULL, + .sources = NULL, + .source_type = SDJF_ALL, + .filters = 0, + .sampling = SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING, + }; + + if( (payload && !parse_post_params(facets, &q, wb, payload, transaction)) || + (!payload && !parse_get_params(facets, &q, wb, function, transaction)) ) + goto cleanup; + + // ---------------------------------------------------------------------------------------------------------------- + // register the fields in the order you want them on the dashboard + + facets_register_row_severity(facets, syslog_priority_to_facet_severity, NULL); + + facets_register_key_name( + facets, "_HOSTNAME", + q.default_facet | FACET_KEY_OPTION_VISIBLE); + + facets_register_dynamic_key_name( + facets, JOURNAL_KEY_ND_JOURNAL_PROCESS, + FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_VISIBLE, + netdata_systemd_journal_dynamic_row_id, NULL); + + facets_register_key_name( + facets, "MESSAGE", + FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | + FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS); + + // facets_register_dynamic_key_name( + // facets, "MESSAGE", + // FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | FACET_KEY_OPTION_RICH_TEXT | + // FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS, + // netdata_systemd_journal_rich_message, NULL); + + facets_register_key_name_transformation( + facets, "PRIORITY", + q.default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW | + FACET_KEY_OPTION_EXPANDED_FILTER, + netdata_systemd_journal_transform_priority, NULL); + + facets_register_key_name_transformation( + facets, "SYSLOG_FACILITY", + q.default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW | + FACET_KEY_OPTION_EXPANDED_FILTER, + netdata_systemd_journal_transform_syslog_facility, NULL); + + facets_register_key_name_transformation( + facets, "ERRNO", + q.default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_errno, NULL); + + facets_register_key_name( + facets, JOURNAL_KEY_ND_JOURNAL_FILE, + FACET_KEY_OPTION_NEVER_FACET); + + facets_register_key_name( + facets, "SYSLOG_IDENTIFIER", + q.default_facet); + + facets_register_key_name( + facets, "UNIT", + q.default_facet); + + facets_register_key_name( + facets, "USER_UNIT", + q.default_facet); + + facets_register_key_name_transformation( + facets, "MESSAGE_ID", + q.default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW | + FACET_KEY_OPTION_EXPANDED_FILTER, + netdata_systemd_journal_transform_message_id, NULL); + + facets_register_key_name_transformation( + facets, "_BOOT_ID", + q.default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_boot_id, NULL); + + facets_register_key_name_transformation( + facets, "_SYSTEMD_OWNER_UID", + q.default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation( + facets, "_UID", + q.default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation( + facets, "OBJECT_SYSTEMD_OWNER_UID", + q.default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation( + facets, "OBJECT_UID", + q.default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation( + facets, "_GID", + q.default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_gid, NULL); + + facets_register_key_name_transformation( + facets, "OBJECT_GID", + q.default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_gid, NULL); + + facets_register_key_name_transformation( + facets, "_CAP_EFFECTIVE", + FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_cap_effective, NULL); + + facets_register_key_name_transformation( + facets, "_AUDIT_LOGINUID", + FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation( + facets, "OBJECT_AUDIT_LOGINUID", + FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_uid, NULL); + + facets_register_key_name_transformation( + facets, "_SOURCE_REALTIME_TIMESTAMP", + FACET_KEY_OPTION_TRANSFORM_VIEW, + netdata_systemd_journal_transform_timestamp_usec, NULL); + // ------------------------------------------------------------------------ // put this request into the progress db @@ -1864,71 +2103,70 @@ void function_systemd_journal(const char *transaction, char *function, usec_t *s time_t now_s = now_realtime_sec(); time_t expires = now_s + 1; - if(!after_s && !before_s) { - before_s = now_s; - after_s = before_s - SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION; + if(!q.after_s && !q.before_s) { + q.before_s = now_s; + q.after_s = q.before_s - SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION; } else - rrdr_relative_window_to_absolute(&after_s, &before_s, now_s); + rrdr_relative_window_to_absolute(&q.after_s, &q.before_s, now_s); - if(after_s > before_s) { - time_t tmp = after_s; - after_s = before_s; - before_s = tmp; + if(q.after_s > q.before_s) { + time_t tmp = q.after_s; + q.after_s = q.before_s; + q.before_s = tmp; } - if(after_s == before_s) - after_s = before_s - SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION; - - if(!last) - last = SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY; + if(q.after_s == q.before_s) + q.after_s = q.before_s - SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION; + if(!q.last) + q.last = SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY; // ------------------------------------------------------------------------ // set query time-frame, anchors and direction fqs->transaction = transaction; - fqs->after_ut = after_s * USEC_PER_SEC; - fqs->before_ut = (before_s * USEC_PER_SEC) + USEC_PER_SEC - 1; - fqs->if_modified_since = if_modified_since; - fqs->data_only = data_only; - fqs->delta = (fqs->data_only) ? delta : false; - fqs->tail = (fqs->data_only && fqs->if_modified_since) ? tail : false; - fqs->sources = sources; - fqs->source_type = source_type; - fqs->entries = last; + fqs->after_ut = q.after_s * USEC_PER_SEC; + fqs->before_ut = (q.before_s * USEC_PER_SEC) + USEC_PER_SEC - 1; + fqs->if_modified_since = q.if_modified_since; + fqs->data_only = q.data_only; + fqs->delta = (fqs->data_only) ? q.delta : false; + fqs->tail = (fqs->data_only && fqs->if_modified_since) ? q.tail : false; + fqs->sources = q.sources; + fqs->source_type = q.source_type; + fqs->entries = q.last; fqs->last_modified = 0; - fqs->filters = filters; - fqs->query = (query && *query) ? query : NULL; - fqs->histogram = (chart && *chart) ? chart : NULL; - fqs->direction = direction; - fqs->anchor.start_ut = anchor; + fqs->filters = q.filters; + fqs->query = (q.query && *q.query) ? q.query : NULL; + fqs->histogram = (q.chart && *q.chart) ? q.chart : NULL; + fqs->direction = q.direction; + fqs->anchor.start_ut = q.anchor; fqs->anchor.stop_ut = 0; - fqs->sampling = sampling; + fqs->sampling = q.sampling; if(fqs->anchor.start_ut && fqs->tail) { // a tail request // we need the top X entries from BEFORE // but, we need to calculate the facets and the // histogram up to the anchor - fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD; + fqs->direction = q.direction = FACETS_ANCHOR_DIRECTION_BACKWARD; fqs->anchor.start_ut = 0; - fqs->anchor.stop_ut = anchor; + fqs->anchor.stop_ut = q.anchor; } - if(anchor && anchor < fqs->after_ut) { + if(q.anchor && q.anchor < fqs->after_ut) { log_fqs(fqs, "received anchor is too small for query timeframe, ignoring anchor"); - anchor = 0; + q.anchor = 0; fqs->anchor.start_ut = 0; fqs->anchor.stop_ut = 0; - fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD; + fqs->direction = q.direction = FACETS_ANCHOR_DIRECTION_BACKWARD; } - else if(anchor > fqs->before_ut) { + else if(q.anchor > fqs->before_ut) { log_fqs(fqs, "received anchor is too big for query timeframe, ignoring anchor"); - anchor = 0; + q.anchor = 0; fqs->anchor.start_ut = 0; fqs->anchor.stop_ut = 0; - fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD; + fqs->direction = q.direction = FACETS_ANCHOR_DIRECTION_BACKWARD; } facets_set_anchor(facets, fqs->anchor.start_ut, fqs->anchor.stop_ut, fqs->direction); @@ -1945,8 +2183,8 @@ void function_systemd_journal(const char *transaction, char *function, usec_t *s facets_set_query(facets, fqs->query); #ifdef HAVE_SD_JOURNAL_RESTART_FIELDS - fqs->slice = slice; - if(slice) + fqs->slice = q.slice; + if(q.slice) facets_enable_slice_mode(facets); #else fqs->slice = false; @@ -1971,7 +2209,7 @@ void function_systemd_journal(const char *transaction, char *function, usec_t *s buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_AFTER, fqs->after_ut / USEC_PER_SEC); buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_BEFORE, fqs->before_ut / USEC_PER_SEC); buffer_json_member_add_uint64(wb, "if_modified_since", fqs->if_modified_since); - buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_ANCHOR, anchor); + buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_ANCHOR, q.anchor); buffer_json_member_add_string(wb, JOURNAL_PARAMETER_DIRECTION, fqs->direction == FACETS_ANCHOR_DIRECTION_FORWARD ? "forward" : "backward"); buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_LAST, fqs->entries); buffer_json_member_add_string(wb, JOURNAL_PARAMETER_QUERY, fqs->query); @@ -1985,7 +2223,7 @@ void function_systemd_journal(const char *transaction, char *function, usec_t *s int response; - if(info) { + if(q.info) { facets_accepted_parameters_to_json_array(facets, wb, false); buffer_json_member_add_array(wb, "required_params"); { @@ -2033,7 +2271,8 @@ void function_systemd_journal(const char *transaction, char *function, usec_t *s netdata_mutex_unlock(&stdout_mutex); cleanup: - simple_pattern_free(sources); + freez((void *)q.query); + freez((void *)q.chart); + simple_pattern_free(q.sources); facets_destroy(facets); - buffer_free(wb); } diff --git a/src/collectors/systemd-journal.plugin/systemd-main.c b/src/collectors/systemd-journal.plugin/systemd-main.c index e3afe4e86f8b75..e1a6f559b8af56 100644 --- a/src/collectors/systemd-journal.plugin/systemd-main.c +++ b/src/collectors/systemd-journal.plugin/systemd-main.c @@ -46,7 +46,8 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) { bool cancelled = false; usec_t stop_monotonic_ut = now_monotonic_usec() + 600 * USEC_PER_SEC; - char buf[] = "systemd-journal after:-8640000 before:0 direction:backward last:200 data_only:false slice:true source:all"; + // char buf[] = "systemd-journal after:-8640000 before:0 direction:backward last:200 data_only:false slice:true source:all"; + char buf[] = "systemd-journal after:-8640000 before:0 direction:backward last:200 data_only:false slice:true facets: source:all"; // char buf[] = "systemd-journal after:1695332964 before:1695937764 direction:backward last:100 slice:true source:all DHKucpqUoe1:PtVoyIuX.MU"; // char buf[] = "systemd-journal after:1694511062 before:1694514662 anchor:1694514122024403"; function_systemd_journal("123", buf, &stop_monotonic_ut, &cancelled, diff --git a/src/collectors/tc.plugin/plugin_tc.c b/src/collectors/tc.plugin/plugin_tc.c index da2a39194dead6..95b0e3b1c3f329 100644 --- a/src/collectors/tc.plugin/plugin_tc.c +++ b/src/collectors/tc.plugin/plugin_tc.c @@ -928,7 +928,7 @@ void *tc_main(void *ptr) { } char buffer[TC_LINE_MAX+1] = ""; - while(fgets(buffer, TC_LINE_MAX, tc_child_instance->child_stdout_fp) != NULL) { + while(fgets(buffer, TC_LINE_MAX, spawn_popen_stdout(tc_child_instance)) != NULL) { if(unlikely(!service_running(SERVICE_COLLECTORS))) break; buffer[TC_LINE_MAX] = '\0'; diff --git a/src/daemon/README.md b/src/daemon/README.md index bc2ec7757f8a69..65bebe5ac06409 100644 --- a/src/daemon/README.md +++ b/src/daemon/README.md @@ -104,9 +104,6 @@ The command line options of the Netdata 1.10.0 version are the following: -W simple-pattern pattern string Check if string matches pattern and exit. - -W "claim -token=TOKEN -rooms=ROOM1,ROOM2 url=https://app.netdata.cloud" - Connect the agent to the workspace Rooms pointed to by TOKEN and ROOM*. - Signals netdata handles: - HUP Close and reopen log files. diff --git a/src/daemon/analytics.c b/src/daemon/analytics.c index 0e5c221c41526d..7ca99cc5a98733 100644 --- a/src/daemon/analytics.c +++ b/src/daemon/analytics.c @@ -334,7 +334,7 @@ void analytics_alarms_notifications(void) if (instance) { char line[200 + 1]; - while (fgets(line, 200, instance->child_stdout_fp) != NULL) { + while (fgets(line, 200, spawn_popen_stdout(instance)) != NULL) { char *end = line; while (*end && *end != '\n') end++; @@ -375,7 +375,6 @@ static void analytics_get_install_type(struct rrdhost_system_info *system_info) void analytics_https(void) { BUFFER *b = buffer_create(30, NULL); -#ifdef ENABLE_HTTPS analytics_exporting_connectors_ssl(b); buffer_strcat(b, netdata_ssl_streaming_sender_ctx && @@ -383,9 +382,6 @@ void analytics_https(void) SSL_connection(&localhost->sender->ssl) ? "streaming|" : "|"); buffer_strcat(b, netdata_ssl_web_server_ctx ? "web" : ""); -#else - buffer_strcat(b, "||"); -#endif analytics_set_data_str(&analytics_data.netdata_config_https_available, (char *)buffer_tostring(b)); buffer_free(b); @@ -468,13 +464,8 @@ void analytics_alarms(void) */ void analytics_misc(void) { -#ifdef ENABLE_ACLK analytics_set_data(&analytics_data.netdata_host_cloud_available, "true"); analytics_set_data_str(&analytics_data.netdata_host_aclk_implementation, "Next Generation"); -#else - analytics_set_data(&analytics_data.netdata_host_cloud_available, "false"); - analytics_set_data_str(&analytics_data.netdata_host_aclk_implementation, ""); -#endif analytics_data.exporting_enabled = appconfig_get_boolean(&exporting_config, CONFIG_SECTION_EXPORTING, "enabled", CONFIG_BOOLEAN_NO); analytics_set_data(&analytics_data.netdata_config_exporting_enabled, analytics_data.exporting_enabled ? "true" : "false"); @@ -495,13 +486,11 @@ void analytics_misc(void) void analytics_aclk(void) { -#ifdef ENABLE_ACLK - if (aclk_connected) { + if (aclk_online()) { analytics_set_data(&analytics_data.netdata_host_aclk_available, "true"); analytics_set_data_str(&analytics_data.netdata_host_aclk_protocol, "New"); } else -#endif analytics_set_data(&analytics_data.netdata_host_aclk_available, "false"); } @@ -535,9 +524,7 @@ void analytics_gather_mutable_meta_data(void) analytics_set_data( &analytics_data.netdata_config_is_parent, (rrdhost_hosts_available() > 1 || configured_as_parent()) ? "true" : "false"); - char *claim_id = get_agent_claimid(); - analytics_set_data(&analytics_data.netdata_host_agent_claimed, claim_id ? "true" : "false"); - freez(claim_id); + analytics_set_data(&analytics_data.netdata_host_agent_claimed, is_agent_claimed() ? "true" : "false"); { char b[21]; @@ -627,46 +614,15 @@ void *analytics_main(void *ptr) return NULL; } -static const char *verify_required_directory(const char *dir) -{ - if (chdir(dir) == -1) - fatal("Cannot change directory to '%s'", dir); - - DIR *d = opendir(dir); - if (!d) - fatal("Cannot examine the contents of directory '%s'", dir); - closedir(d); - - return dir; -} - -static const char *verify_or_create_required_directory(const char *dir) { - int result; - - result = mkdir(dir, 0755); - - if (result != 0 && errno != EEXIST) - fatal("Cannot create required directory '%s'", dir); - - return verify_required_directory(dir); -} - /* * This is called after the rrdinit * These values will be sent on the START event */ -void set_late_global_environment(struct rrdhost_system_info *system_info) +void set_late_analytics_variables(struct rrdhost_system_info *system_info) { analytics_set_data(&analytics_data.netdata_config_stream_enabled, default_rrdpush_enabled ? "true" : "false"); analytics_set_data_str(&analytics_data.netdata_config_memory_mode, (char *)rrd_memory_mode_name(default_rrd_memory_mode)); - -#ifdef DISABLE_CLOUD - analytics_set_data(&analytics_data.netdata_host_cloud_enabled, "false"); -#else - analytics_set_data( - &analytics_data.netdata_host_cloud_enabled, - appconfig_get_boolean_ondemand(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", netdata_cloud_enabled) ? "true" : "false"); -#endif + analytics_set_data(&analytics_data.netdata_host_cloud_enabled, "true"); #ifdef ENABLE_DBENGINE { @@ -679,11 +635,7 @@ void set_late_global_environment(struct rrdhost_system_info *system_info) } #endif -#ifdef ENABLE_HTTPS analytics_set_data(&analytics_data.netdata_config_https_enabled, "true"); -#else - analytics_set_data(&analytics_data.netdata_config_https_enabled, "false"); -#endif if (web_server_mode == WEB_SERVER_MODE_NONE) analytics_set_data(&analytics_data.netdata_config_web_enabled, "false"); @@ -831,119 +783,6 @@ void get_system_timezone(void) } } -void set_global_environment() { - { - char b[16]; - snprintfz(b, sizeof(b) - 1, "%d", default_rrd_update_every); - setenv("NETDATA_UPDATE_EVERY", b, 1); - } - - setenv("NETDATA_VERSION", NETDATA_VERSION, 1); - setenv("NETDATA_HOSTNAME", netdata_configured_hostname, 1); - setenv("NETDATA_CONFIG_DIR", verify_required_directory(netdata_configured_user_config_dir), 1); - setenv("NETDATA_USER_CONFIG_DIR", verify_required_directory(netdata_configured_user_config_dir), 1); - setenv("NETDATA_STOCK_CONFIG_DIR", verify_required_directory(netdata_configured_stock_config_dir), 1); - setenv("NETDATA_PLUGINS_DIR", verify_required_directory(netdata_configured_primary_plugins_dir), 1); - setenv("NETDATA_WEB_DIR", verify_required_directory(netdata_configured_web_dir), 1); - setenv("NETDATA_CACHE_DIR", verify_or_create_required_directory(netdata_configured_cache_dir), 1); - setenv("NETDATA_LIB_DIR", verify_or_create_required_directory(netdata_configured_varlib_dir), 1); - setenv("NETDATA_LOCK_DIR", verify_or_create_required_directory(netdata_configured_lock_dir), 1); - setenv("NETDATA_LOG_DIR", verify_or_create_required_directory(netdata_configured_log_dir), 1); - setenv("NETDATA_HOST_PREFIX", netdata_configured_host_prefix, 1); - - { - BUFFER *user_plugins_dirs = buffer_create(FILENAME_MAX, NULL); - - for (size_t i = 1; i < PLUGINSD_MAX_DIRECTORIES && plugin_directories[i]; i++) { - if (i > 1) - buffer_strcat(user_plugins_dirs, " "); - buffer_strcat(user_plugins_dirs, plugin_directories[i]); - } - - setenv("NETDATA_USER_PLUGINS_DIRS", buffer_tostring(user_plugins_dirs), 1); - - buffer_free(user_plugins_dirs); - } - - analytics_data.data_length = 0; - analytics_set_data(&analytics_data.netdata_config_stream_enabled, "null"); - analytics_set_data(&analytics_data.netdata_config_memory_mode, "null"); - analytics_set_data(&analytics_data.netdata_config_exporting_enabled, "null"); - analytics_set_data(&analytics_data.netdata_exporting_connectors, "null"); - analytics_set_data(&analytics_data.netdata_allmetrics_prometheus_used, "null"); - analytics_set_data(&analytics_data.netdata_allmetrics_shell_used, "null"); - analytics_set_data(&analytics_data.netdata_allmetrics_json_used, "null"); - analytics_set_data(&analytics_data.netdata_dashboard_used, "null"); - analytics_set_data(&analytics_data.netdata_collectors, "null"); - analytics_set_data(&analytics_data.netdata_collectors_count, "null"); - analytics_set_data(&analytics_data.netdata_buildinfo, "null"); - analytics_set_data(&analytics_data.netdata_config_page_cache_size, "null"); - analytics_set_data(&analytics_data.netdata_config_multidb_disk_quota, "null"); - analytics_set_data(&analytics_data.netdata_config_https_enabled, "null"); - analytics_set_data(&analytics_data.netdata_config_web_enabled, "null"); - analytics_set_data(&analytics_data.netdata_config_release_channel, "null"); - analytics_set_data(&analytics_data.netdata_mirrored_host_count, "null"); - analytics_set_data(&analytics_data.netdata_mirrored_hosts_reachable, "null"); - analytics_set_data(&analytics_data.netdata_mirrored_hosts_unreachable, "null"); - analytics_set_data(&analytics_data.netdata_notification_methods, "null"); - analytics_set_data(&analytics_data.netdata_alarms_normal, "null"); - analytics_set_data(&analytics_data.netdata_alarms_warning, "null"); - analytics_set_data(&analytics_data.netdata_alarms_critical, "null"); - analytics_set_data(&analytics_data.netdata_charts_count, "null"); - analytics_set_data(&analytics_data.netdata_metrics_count, "null"); - analytics_set_data(&analytics_data.netdata_config_is_parent, "null"); - analytics_set_data(&analytics_data.netdata_config_hosts_available, "null"); - analytics_set_data(&analytics_data.netdata_host_cloud_available, "null"); - analytics_set_data(&analytics_data.netdata_host_aclk_implementation, "null"); - analytics_set_data(&analytics_data.netdata_host_aclk_available, "null"); - analytics_set_data(&analytics_data.netdata_host_aclk_protocol, "null"); - analytics_set_data(&analytics_data.netdata_host_agent_claimed, "null"); - analytics_set_data(&analytics_data.netdata_host_cloud_enabled, "null"); - analytics_set_data(&analytics_data.netdata_config_https_available, "null"); - analytics_set_data(&analytics_data.netdata_install_type, "null"); - analytics_set_data(&analytics_data.netdata_config_is_private_registry, "null"); - analytics_set_data(&analytics_data.netdata_config_use_private_registry, "null"); - analytics_set_data(&analytics_data.netdata_config_oom_score, "null"); - analytics_set_data(&analytics_data.netdata_prebuilt_distro, "null"); - analytics_set_data(&analytics_data.netdata_fail_reason, "null"); - - analytics_data.prometheus_hits = 0; - analytics_data.shell_hits = 0; - analytics_data.json_hits = 0; - analytics_data.dashboard_hits = 0; - analytics_data.charts_count = 0; - analytics_data.metrics_count = 0; - analytics_data.exporting_enabled = false; - - char *default_port = appconfig_get(&netdata_config, CONFIG_SECTION_WEB, "default port", NULL); - int clean = 0; - if (!default_port) { - default_port = strdupz("19999"); - clean = 1; - } - - setenv("NETDATA_LISTEN_PORT", default_port, 1); - if (clean) - freez(default_port); - - // set the path we need - char path[4096], *p = getenv("PATH"); - if (!p) p = "/bin:/usr/bin"; - snprintfz(path, sizeof(path), "%s:%s", p, "/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"); - setenv("PATH", config_get(CONFIG_SECTION_ENV_VARS, "PATH", path), 1); - - // python options - p = getenv("PYTHONPATH"); - if (!p) p = ""; - setenv("PYTHONPATH", config_get(CONFIG_SECTION_ENV_VARS, "PYTHONPATH", p), 1); - - // disable buffering for python plugins - setenv("PYTHONUNBUFFERED", "1", 1); - - // switch to standard locale for plugins - setenv("LC_ALL", "C", 1); -} - void analytics_statistic_send(const analytics_statistic_t *statistic) { if (!statistic) return; @@ -1053,7 +892,7 @@ void analytics_statistic_send(const analytics_statistic_t *statistic) { POPEN_INSTANCE *instance = spawn_popen_run(command_to_run); if (instance) { char buffer[4 + 1]; - char *s = fgets(buffer, 4, instance->child_stdout_fp); + char *s = fgets(buffer, 4, spawn_popen_stdout(instance)); int exit_code = spawn_popen_wait(instance); if (exit_code) @@ -1075,6 +914,58 @@ void analytics_statistic_send(const analytics_statistic_t *statistic) { freez(command_to_run); } +void analytics_reset(void) { + analytics_data.data_length = 0; + analytics_set_data(&analytics_data.netdata_config_stream_enabled, "null"); + analytics_set_data(&analytics_data.netdata_config_memory_mode, "null"); + analytics_set_data(&analytics_data.netdata_config_exporting_enabled, "null"); + analytics_set_data(&analytics_data.netdata_exporting_connectors, "null"); + analytics_set_data(&analytics_data.netdata_allmetrics_prometheus_used, "null"); + analytics_set_data(&analytics_data.netdata_allmetrics_shell_used, "null"); + analytics_set_data(&analytics_data.netdata_allmetrics_json_used, "null"); + analytics_set_data(&analytics_data.netdata_dashboard_used, "null"); + analytics_set_data(&analytics_data.netdata_collectors, "null"); + analytics_set_data(&analytics_data.netdata_collectors_count, "null"); + analytics_set_data(&analytics_data.netdata_buildinfo, "null"); + analytics_set_data(&analytics_data.netdata_config_page_cache_size, "null"); + analytics_set_data(&analytics_data.netdata_config_multidb_disk_quota, "null"); + analytics_set_data(&analytics_data.netdata_config_https_enabled, "null"); + analytics_set_data(&analytics_data.netdata_config_web_enabled, "null"); + analytics_set_data(&analytics_data.netdata_config_release_channel, "null"); + analytics_set_data(&analytics_data.netdata_mirrored_host_count, "null"); + analytics_set_data(&analytics_data.netdata_mirrored_hosts_reachable, "null"); + analytics_set_data(&analytics_data.netdata_mirrored_hosts_unreachable, "null"); + analytics_set_data(&analytics_data.netdata_notification_methods, "null"); + analytics_set_data(&analytics_data.netdata_alarms_normal, "null"); + analytics_set_data(&analytics_data.netdata_alarms_warning, "null"); + analytics_set_data(&analytics_data.netdata_alarms_critical, "null"); + analytics_set_data(&analytics_data.netdata_charts_count, "null"); + analytics_set_data(&analytics_data.netdata_metrics_count, "null"); + analytics_set_data(&analytics_data.netdata_config_is_parent, "null"); + analytics_set_data(&analytics_data.netdata_config_hosts_available, "null"); + analytics_set_data(&analytics_data.netdata_host_cloud_available, "null"); + analytics_set_data(&analytics_data.netdata_host_aclk_implementation, "null"); + analytics_set_data(&analytics_data.netdata_host_aclk_available, "null"); + analytics_set_data(&analytics_data.netdata_host_aclk_protocol, "null"); + analytics_set_data(&analytics_data.netdata_host_agent_claimed, "null"); + analytics_set_data(&analytics_data.netdata_host_cloud_enabled, "null"); + analytics_set_data(&analytics_data.netdata_config_https_available, "null"); + analytics_set_data(&analytics_data.netdata_install_type, "null"); + analytics_set_data(&analytics_data.netdata_config_is_private_registry, "null"); + analytics_set_data(&analytics_data.netdata_config_use_private_registry, "null"); + analytics_set_data(&analytics_data.netdata_config_oom_score, "null"); + analytics_set_data(&analytics_data.netdata_prebuilt_distro, "null"); + analytics_set_data(&analytics_data.netdata_fail_reason, "null"); + + analytics_data.prometheus_hits = 0; + analytics_data.shell_hits = 0; + analytics_data.json_hits = 0; + analytics_data.dashboard_hits = 0; + analytics_data.charts_count = 0; + analytics_data.metrics_count = 0; + analytics_data.exporting_enabled = false; +} + void analytics_init(void) { spinlock_init(&analytics_data.spinlock); diff --git a/src/daemon/analytics.h b/src/daemon/analytics.h index 747cf6070ef3a7..b818bea9387c6c 100644 --- a/src/daemon/analytics.h +++ b/src/daemon/analytics.h @@ -76,9 +76,8 @@ struct analytics_data { bool exporting_enabled; }; -void set_late_global_environment(struct rrdhost_system_info *system_info); +void set_late_analytics_variables(struct rrdhost_system_info *system_info); void analytics_free_data(void); -void set_global_environment(void); void analytics_log_shell(void); void analytics_log_json(void); void analytics_log_prometheus(void); @@ -86,6 +85,7 @@ void analytics_log_dashboard(void); void analytics_gather_mutable_meta_data(void); void analytics_report_oom_score(long long int score); void get_system_timezone(void); +void analytics_reset(void); void analytics_init(void); typedef struct { diff --git a/src/daemon/buildinfo.c b/src/daemon/buildinfo.c index ace96199a821d2..575f287faa99d2 100644 --- a/src/daemon/buildinfo.c +++ b/src/daemon/buildinfo.c @@ -1069,18 +1069,8 @@ __attribute__((constructor)) void initialize_build_info(void) { #endif #endif -#ifdef ENABLE_ACLK build_info_set_status(BIB_FEATURE_CLOUD, true); build_info_set_status(BIB_CONNECTIVITY_ACLK, true); -#else - build_info_set_status(BIB_FEATURE_CLOUD, false); -#ifdef DISABLE_CLOUD - build_info_set_value(BIB_FEATURE_CLOUD, "disabled"); -#else - build_info_set_value(BIB_FEATURE_CLOUD, "unavailable"); -#endif -#endif - build_info_set_status(BIB_FEATURE_HEALTH, true); build_info_set_status(BIB_FEATURE_STREAMING, true); build_info_set_status(BIB_FEATURE_BACKFILLING, true); @@ -1126,9 +1116,7 @@ __attribute__((constructor)) void initialize_build_info(void) { #ifdef ENABLE_WEBRTC build_info_set_status(BIB_CONNECTIVITY_WEBRTC, true); #endif -#ifdef ENABLE_HTTPS build_info_set_status(BIB_CONNECTIVITY_NATIVE_HTTPS, true); -#endif #if defined(HAVE_X509_VERIFY_PARAM_set1_host) && HAVE_X509_VERIFY_PARAM_set1_host == 1 build_info_set_status(BIB_CONNECTIVITY_TLS_HOST_VERIFY, true); #endif @@ -1162,9 +1150,7 @@ __attribute__((constructor)) void initialize_build_info(void) { #ifdef HAVE_LIBDATACHANNEL build_info_set_status(BIB_LIB_LIBDATACHANNEL, true); #endif -#ifdef ENABLE_OPENSSL build_info_set_status(BIB_LIB_OPENSSL, true); -#endif #ifdef ENABLE_JSONC build_info_set_status(BIB_LIB_JSONC, true); #endif diff --git a/src/daemon/commands.c b/src/daemon/commands.c index 230e8527eac348..35573e071e3cb7 100644 --- a/src/daemon/commands.c +++ b/src/daemon/commands.c @@ -47,9 +47,7 @@ static cmd_status_t cmd_ping_execute(char *args, char **message); static cmd_status_t cmd_aclk_state(char *args, char **message); static cmd_status_t cmd_version(char *args, char **message); static cmd_status_t cmd_dumpconfig(char *args, char **message); -#ifdef ENABLE_ACLK static cmd_status_t cmd_remove_node(char *args, char **message); -#endif static command_info_t command_info_array[] = { {"help", cmd_help_execute, CMD_TYPE_HIGH_PRIORITY}, // show help menu @@ -65,9 +63,7 @@ static command_info_t command_info_array[] = { {"aclk-state", cmd_aclk_state, CMD_TYPE_ORTHOGONAL}, {"version", cmd_version, CMD_TYPE_ORTHOGONAL}, {"dumpconfig", cmd_dumpconfig, CMD_TYPE_ORTHOGONAL}, -#ifdef ENABLE_ACLK {"remove-stale-node", cmd_remove_node, CMD_TYPE_ORTHOGONAL} -#endif }; /* Mutexes for commands of type CMD_TYPE_ORTHOGONAL */ @@ -135,10 +131,8 @@ static cmd_status_t cmd_help_execute(char *args, char **message) " Returns current state of ACLK and Cloud connection. (optionally in json).\n" "dumpconfig\n" " Returns the current netdata.conf on stdout.\n" -#ifdef ENABLE_ACLK "remove-stale-node node_id|machine_guid|hostname|ALL_NODES\n" " Unregisters and removes a node from the cloud.\n" -#endif "version\n" " Returns the netdata version.\n", MAX_COMMAND_LENGTH - 1); @@ -193,17 +187,42 @@ static cmd_status_t cmd_fatal_execute(char *args, char **message) return CMD_STATUS_SUCCESS; } -static cmd_status_t cmd_reload_claiming_state_execute(char *args, char **message) -{ - (void)args; - (void)message; -#if defined(DISABLE_CLOUD) || !defined(ENABLE_ACLK) - netdata_log_info("The claiming feature has been explicitly disabled"); - *message = strdupz("This agent cannot be claimed, it was built without support for Cloud"); - return CMD_STATUS_FAILURE; -#endif - netdata_log_info("COMMAND: Reloading Agent Claiming configuration."); - claim_reload_all(); +static cmd_status_t cmd_reload_claiming_state_execute(char *args __maybe_unused, char **message) { + char msg[1024]; + + CLOUD_STATUS status = claim_reload_and_wait_online(); + switch(status) { + case CLOUD_STATUS_ONLINE: + snprintfz(msg, sizeof(msg), + "Netdata Agent is claimed to Netdata Cloud and is currently online."); + break; + + case CLOUD_STATUS_BANNED: + snprintfz(msg, sizeof(msg), + "Netdata Agent is claimed to Netdata Cloud, but it is banned."); + break; + + default: + case CLOUD_STATUS_AVAILABLE: + snprintfz(msg, sizeof(msg), + "Netdata Agent is not claimed to Netdata Cloud: %s", + claim_agent_failure_reason_get()); + break; + + case CLOUD_STATUS_OFFLINE: + snprintfz(msg, sizeof(msg), + "Netdata Agent is claimed to Netdata Cloud, but it is currently offline: %s", + cloud_status_aclk_offline_reason()); + break; + + case CLOUD_STATUS_INDIRECT: + snprintfz(msg, sizeof(msg), + "Netdata Agent is not claimed to Netdata Cloud, but it is currently online via parent."); + break; + } + + *message = strdupz(msg); + return CMD_STATUS_SUCCESS; } @@ -306,17 +325,10 @@ static cmd_status_t cmd_ping_execute(char *args, char **message) static cmd_status_t cmd_aclk_state(char *args, char **message) { netdata_log_info("COMMAND: Reopening aclk/cloud state."); -#ifdef ENABLE_ACLK if (strstr(args, "json")) *message = aclk_state_json(); else *message = aclk_state(); -#else - if (strstr(args, "json")) - *message = strdupz("{\"aclk-available\":false}"); - else - *message = strdupz("ACLK Available: No"); -#endif return CMD_STATUS_SUCCESS; } @@ -338,14 +350,12 @@ static cmd_status_t cmd_dumpconfig(char *args, char **message) (void)args; BUFFER *wb = buffer_create(1024, NULL); - config_generate(wb, 0); + netdata_conf_generate(wb, 0); *message = strdupz(buffer_tostring(wb)); buffer_free(wb); return CMD_STATUS_SUCCESS; } -#ifdef ENABLE_ACLK - static int remove_ephemeral_host(BUFFER *wb, RRDHOST *host, bool report_error) { if (host == localhost) { @@ -365,8 +375,7 @@ static int remove_ephemeral_host(BUFFER *wb, RRDHOST *host, bool report_error) sql_set_host_label(&host->host_uuid, "_is_ephemeral", "true"); aclk_host_state_update(host, 0, 0); unregister_node(host->machine_guid); - freez(host->node_id); - host->node_id = NULL; + uuid_clear(host->node_id); buffer_sprintf(wb, "Unregistering node with machine guid %s, hostname = %s", host->machine_guid, rrdhost_hostname(host)); rrd_wrlock(); rrdhost_free___while_having_rrd_wrlock(host, true); @@ -438,7 +447,6 @@ static cmd_status_t cmd_remove_node(char *args, char **message) buffer_free(wb); return CMD_STATUS_SUCCESS; } -#endif static void cmd_lock_exclusive(unsigned index) { diff --git a/src/daemon/commands.h b/src/daemon/commands.h index 14c2ec49e0bf3e..8327d28d2f9be5 100644 --- a/src/daemon/commands.h +++ b/src/daemon/commands.h @@ -20,9 +20,7 @@ typedef enum cmd { CMD_ACLK_STATE, CMD_VERSION, CMD_DUMPCONFIG, -#ifdef ENABLE_ACLK CMD_REMOVE_NODE, -#endif CMD_TOTAL_COMMANDS } cmd_t; diff --git a/src/daemon/common.c b/src/daemon/common.c index 6c824eec68cbef..6cc9c067cf1dea 100644 --- a/src/daemon/common.c +++ b/src/daemon/common.c @@ -11,6 +11,7 @@ char *netdata_configured_web_dir = WEB_DIR; char *netdata_configured_cache_dir = CACHE_DIR; char *netdata_configured_varlib_dir = VARLIB_DIR; char *netdata_configured_lock_dir = VARLIB_DIR "/lock"; +char *netdata_configured_cloud_dir = VARLIB_DIR "/cloud.d"; char *netdata_configured_home_dir = VARLIB_DIR; char *netdata_configured_host_prefix = NULL; char *netdata_configured_timezone = NULL; @@ -19,12 +20,6 @@ int32_t netdata_configured_utc_offset = 0; bool netdata_ready = false; -#if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK ) -int netdata_cloud_enabled = CONFIG_BOOLEAN_NO; -#else -int netdata_cloud_enabled = CONFIG_BOOLEAN_AUTO; -#endif - long get_netdata_cpus(void) { static long processors = 0; @@ -63,135 +58,3 @@ long get_netdata_cpus(void) { return processors; } - -const char *cloud_status_to_string(CLOUD_STATUS status) { - switch(status) { - default: - case CLOUD_STATUS_UNAVAILABLE: - return "unavailable"; - - case CLOUD_STATUS_AVAILABLE: - return "available"; - - case CLOUD_STATUS_DISABLED: - return "disabled"; - - case CLOUD_STATUS_BANNED: - return "banned"; - - case CLOUD_STATUS_OFFLINE: - return "offline"; - - case CLOUD_STATUS_ONLINE: - return "online"; - } -} - -CLOUD_STATUS cloud_status(void) { -#ifdef ENABLE_ACLK - if(aclk_disable_runtime) - return CLOUD_STATUS_BANNED; - - if(aclk_connected) - return CLOUD_STATUS_ONLINE; - - if(netdata_cloud_enabled == CONFIG_BOOLEAN_YES) { - char *agent_id = get_agent_claimid(); - bool claimed = agent_id != NULL; - freez(agent_id); - - if(claimed) - return CLOUD_STATUS_OFFLINE; - } - - if(netdata_cloud_enabled != CONFIG_BOOLEAN_NO) - return CLOUD_STATUS_AVAILABLE; - - return CLOUD_STATUS_DISABLED; -#else - return CLOUD_STATUS_UNAVAILABLE; -#endif -} - -time_t cloud_last_change(void) { -#ifdef ENABLE_ACLK - time_t ret = MAX(last_conn_time_mqtt, last_disconnect_time); - if(!ret) ret = netdata_start_time; - return ret; -#else - return netdata_start_time; -#endif -} - -time_t cloud_next_connection_attempt(void) { -#ifdef ENABLE_ACLK - return next_connection_attempt; -#else - return 0; -#endif -} - -size_t cloud_connection_id(void) { -#ifdef ENABLE_ACLK - return aclk_connection_counter; -#else - return 0; -#endif -} - -const char *cloud_offline_reason() { -#ifdef ENABLE_ACLK - if(!netdata_cloud_enabled) - return "disabled"; - - if(aclk_disable_runtime) - return "banned"; - - return aclk_status_to_string(); -#else - return "disabled"; -#endif -} - -const char *cloud_base_url() { -#ifdef ENABLE_ACLK - return aclk_cloud_base_url; -#else - return NULL; -#endif -} - -CLOUD_STATUS buffer_json_cloud_status(BUFFER *wb, time_t now_s) { - CLOUD_STATUS status = cloud_status(); - - buffer_json_member_add_object(wb, "cloud"); - { - size_t id = cloud_connection_id(); - time_t last_change = cloud_last_change(); - time_t next_connect = cloud_next_connection_attempt(); - buffer_json_member_add_uint64(wb, "id", id); - buffer_json_member_add_string(wb, "status", cloud_status_to_string(status)); - buffer_json_member_add_time_t(wb, "since", last_change); - buffer_json_member_add_time_t(wb, "age", now_s - last_change); - - if (status != CLOUD_STATUS_ONLINE) - buffer_json_member_add_string(wb, "reason", cloud_offline_reason()); - - if (status == CLOUD_STATUS_OFFLINE && next_connect > now_s) { - buffer_json_member_add_time_t(wb, "next_check", next_connect); - buffer_json_member_add_time_t(wb, "next_in", next_connect - now_s); - } - - if (cloud_base_url()) - buffer_json_member_add_string(wb, "url", cloud_base_url()); - - char *claim_id = get_agent_claimid(); - if(claim_id) { - buffer_json_member_add_string(wb, "claim_id", claim_id); - freez(claim_id); - } - } - buffer_json_object_close(wb); // cloud - - return status; -} diff --git a/src/daemon/common.h b/src/daemon/common.h index 1dea19c5b8b573..732f55536f8f8a 100644 --- a/src/daemon/common.h +++ b/src/daemon/common.h @@ -4,7 +4,7 @@ #define NETDATA_COMMON_H 1 #include "libnetdata/libnetdata.h" -#include "event_loop.h" +#include "libuv_workers.h" // ---------------------------------------------------------------------------- // shortcuts for the default netdata configuration @@ -26,7 +26,7 @@ #define config_exists(section, name) appconfig_exists(&netdata_config, section, name) #define config_move(section_old, name_old, section_new, name_new) appconfig_move(&netdata_config, section_old, name_old, section_new, name_new) -#define config_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed) +#define netdata_conf_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed, true) #define config_section_destroy(section) appconfig_section_destroy_non_loaded(&netdata_config, section) #define config_section_option_destroy(section, name) appconfig_section_option_destroy_non_loaded(&netdata_config, section, name) @@ -34,6 +34,8 @@ // ---------------------------------------------------------------------------- // netdata include files +#include "web/api/maps/maps.h" + #include "daemon/config/dyncfg.h" #include "global_statistics.h" @@ -103,6 +105,7 @@ extern char *netdata_configured_web_dir; extern char *netdata_configured_cache_dir; extern char *netdata_configured_varlib_dir; extern char *netdata_configured_lock_dir; +extern char *netdata_configured_cloud_dir; extern char *netdata_configured_home_dir; extern char *netdata_configured_host_prefix; extern char *netdata_configured_timezone; @@ -111,28 +114,10 @@ extern int32_t netdata_configured_utc_offset; extern int netdata_anonymous_statistics_enabled; extern bool netdata_ready; -extern int netdata_cloud_enabled; - extern time_t netdata_start_time; long get_netdata_cpus(void); -typedef enum __attribute__((packed)) { - CLOUD_STATUS_UNAVAILABLE = 0, // cloud and aclk functionality is not available on this agent - CLOUD_STATUS_AVAILABLE, // cloud and aclk functionality is available, but the agent is not claimed - CLOUD_STATUS_DISABLED, // cloud and aclk functionality is available, but it is disabled - CLOUD_STATUS_BANNED, // the agent has been banned from cloud - CLOUD_STATUS_OFFLINE, // the agent tries to connect to cloud, but cannot do it - CLOUD_STATUS_ONLINE, // the agent is connected to cloud -} CLOUD_STATUS; - -const char *cloud_status_to_string(CLOUD_STATUS status); -CLOUD_STATUS cloud_status(void); -time_t cloud_last_change(void); -time_t cloud_next_connection_attempt(void); -size_t cloud_connection_id(void); -const char *cloud_offline_reason(void); -const char *cloud_base_url(void); -CLOUD_STATUS buffer_json_cloud_status(BUFFER *wb, time_t now_s); +void set_environment_for_plugins_and_scripts(void); #endif /* NETDATA_COMMON_H */ diff --git a/src/daemon/config/dyncfg-echo.c b/src/daemon/config/dyncfg-echo.c index 95d40a025da78a..f6eb48c355e89c 100644 --- a/src/daemon/config/dyncfg-echo.c +++ b/src/daemon/config/dyncfg-echo.c @@ -96,7 +96,7 @@ void dyncfg_echo(const DICTIONARY_ITEM *item, DYNCFG *df, const char *id __maybe dyncfg_echo_cb, e, NULL, NULL, NULL, NULL, - NULL, string2str(df->dyncfg.source)); + NULL, string2str(df->dyncfg.source), false); } // ---------------------------------------------------------------------------- @@ -129,7 +129,7 @@ void dyncfg_echo_update(const DICTIONARY_ITEM *item, DYNCFG *df, const char *id) dyncfg_echo_cb, e, NULL, NULL, NULL, NULL, - df->dyncfg.payload, string2str(df->dyncfg.source)); + df->dyncfg.payload, string2str(df->dyncfg.source), false); } // ---------------------------------------------------------------------------- @@ -164,7 +164,7 @@ static void dyncfg_echo_payload_add(const DICTIONARY_ITEM *item_template __maybe dyncfg_echo_cb, e, NULL, NULL, NULL, NULL, - df_job->dyncfg.payload, string2str(df_job->dyncfg.source)); + df_job->dyncfg.payload, string2str(df_job->dyncfg.source), false); } void dyncfg_echo_add(const DICTIONARY_ITEM *item_template, const DICTIONARY_ITEM *item_job, DYNCFG *df_template, DYNCFG *df_job, const char *template_id, const char *job_name) { diff --git a/src/daemon/config/dyncfg-unittest.c b/src/daemon/config/dyncfg-unittest.c index 775dc7cbd43714..136ea838f8c388 100644 --- a/src/daemon/config/dyncfg-unittest.c +++ b/src/daemon/config/dyncfg-unittest.c @@ -473,7 +473,7 @@ static int dyncfg_unittest_run(const char *cmd, BUFFER *wb, const char *payload, NULL, NULL, NULL, NULL, NULL, NULL, - pld, source); + pld, source, false); if(!DYNCFG_RESP_SUCCESS(rc)) { nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG UNITTEST: failed to run: %s; returned code %d", cmd, rc); dyncfg_unittest_register_error(NULL, NULL); diff --git a/src/daemon/daemon.c b/src/daemon/daemon.c index 2392d4cc1d1f23..b97f687fcbdea7 100644 --- a/src/daemon/daemon.c +++ b/src/daemon/daemon.c @@ -3,34 +3,24 @@ #include "common.h" #include -char pidfile[FILENAME_MAX + 1] = ""; -char claiming_directory[FILENAME_MAX + 1]; -char netdata_exe_path[FILENAME_MAX + 1]; -char netdata_exe_file[FILENAME_MAX + 1]; +char *pidfile = NULL; +char *netdata_exe_path = NULL; void get_netdata_execution_path(void) { - int ret; - size_t exepath_size = 0; - struct passwd *passwd = NULL; - char *user = NULL; - - passwd = getpwuid(getuid()); - user = (passwd && passwd->pw_name) ? passwd->pw_name : ""; - - exepath_size = sizeof(netdata_exe_file) - 1; - ret = uv_exepath(netdata_exe_file, &exepath_size); - if (0 != ret) { - netdata_log_error("uv_exepath(\"%s\", %u) (user: %s) failed (%s).", netdata_exe_file, (unsigned)exepath_size, user, - uv_strerror(ret)); - fatal("Cannot start netdata without getting execution path."); + struct passwd *passwd = getpwuid(getuid()); + char *user = (passwd && passwd->pw_name) ? passwd->pw_name : ""; + + char b[FILENAME_MAX + 1]; + size_t b_size = sizeof(b) - 1; + int ret = uv_exepath(b, &b_size); + if (ret != 0) { + fatal("Cannot start netdata without getting execution path. " + "(uv_exepath(\"%s\", %zu), user: '%s', failed: %s).", + b, b_size, user, uv_strerror(ret)); } + b[b_size] = '\0'; - netdata_exe_file[exepath_size] = '\0'; - - // macOS's dirname(3) does not modify passed string - char *tmpdir = strdupz(netdata_exe_file); - strcpy(netdata_exe_path, dirname(tmpdir)); - freez(tmpdir); + netdata_exe_path = strdupz(b); } static void fix_directory_file_permissions(const char *dirname, uid_t uid, gid_t gid, bool recursive) @@ -89,7 +79,7 @@ static void prepare_required_directories(uid_t uid, gid_t gid) { change_dir_ownership(netdata_configured_varlib_dir, uid, gid, false); change_dir_ownership(netdata_configured_lock_dir, uid, gid, false); change_dir_ownership(netdata_configured_log_dir, uid, gid, false); - change_dir_ownership(claiming_directory, uid, gid, false); + change_dir_ownership(netdata_configured_cloud_dir, uid, gid, false); char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s/registry", netdata_configured_varlib_dir); @@ -112,7 +102,7 @@ static int become_user(const char *username, int pid_fd) { prepare_required_directories(uid, gid); - if(pidfile[0]) { + if(pidfile && *pidfile) { if(chown(pidfile, uid, gid) == -1) netdata_log_error("Cannot chown '%s' to %u:%u", pidfile, (unsigned int)uid, (unsigned int)gid); } @@ -465,7 +455,7 @@ int become_daemon(int dont_fork, const char *user) // generate our pid file int pidfd = -1; - if(pidfile[0]) { + if(pidfile && *pidfile) { pidfd = open(pidfile, O_WRONLY | O_CREAT | O_CLOEXEC, 0644); if(pidfd >= 0) { if(ftruncate(pidfd, 0) != 0) @@ -490,9 +480,6 @@ int become_daemon(int dont_fork, const char *user) // never become a problem sched_setscheduler_set(); - // Set claiming directory based on user config directory with correct ownership - snprintfz(claiming_directory, FILENAME_MAX, "%s/cloud.d", netdata_configured_varlib_dir); - if(user && *user) { if(become_user(user, pidfd) != 0) { netdata_log_error("Cannot become user '%s'. Continuing as we are.", user); diff --git a/src/daemon/daemon.h b/src/daemon/daemon.h index 1f8837fd6bc9c3..13ef1f64713e0a 100644 --- a/src/daemon/daemon.h +++ b/src/daemon/daemon.h @@ -9,8 +9,7 @@ void netdata_cleanup_and_exit(int ret, const char *action, const char *action_re void get_netdata_execution_path(void); -extern char pidfile[]; -extern char netdata_exe_file[]; -extern char netdata_exe_path[]; +extern char *pidfile; +extern char *netdata_exe_path; #endif /* NETDATA_DAEMON_H */ diff --git a/src/daemon/environment.c b/src/daemon/environment.c new file mode 100644 index 00000000000000..d5f82a0c3c4a7a --- /dev/null +++ b/src/daemon/environment.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common.h" + +static const char *verify_required_directory(const char *dir) +{ + if (chdir(dir) == -1) + fatal("Cannot change directory to '%s'", dir); + + DIR *d = opendir(dir); + if (!d) + fatal("Cannot examine the contents of directory '%s'", dir); + closedir(d); + + return dir; +} + +static const char *verify_or_create_required_directory(const char *dir) { + errno_clear(); + + if (mkdir(dir, 0755) != 0 && errno != EEXIST) + fatal("Cannot create required directory '%s'", dir); + + return verify_required_directory(dir); +} + +static const char *verify_or_create_required_private_directory(const char *dir) { + errno_clear(); + + if (mkdir(dir, 0770) != 0 && errno != EEXIST) + fatal("Cannot create required directory '%s'", dir); + + return verify_required_directory(dir); +} + +void set_environment_for_plugins_and_scripts(void) { + { + char b[16]; + snprintfz(b, sizeof(b) - 1, "%d", default_rrd_update_every); + nd_setenv("NETDATA_UPDATE_EVERY", b, 1); + } + + nd_setenv("NETDATA_VERSION", NETDATA_VERSION, 1); + nd_setenv("NETDATA_HOSTNAME", netdata_configured_hostname, 1); + nd_setenv("NETDATA_CONFIG_DIR", verify_required_directory(netdata_configured_user_config_dir), 1); + nd_setenv("NETDATA_USER_CONFIG_DIR", verify_required_directory(netdata_configured_user_config_dir), 1); + nd_setenv("NETDATA_STOCK_CONFIG_DIR", verify_required_directory(netdata_configured_stock_config_dir), 1); + nd_setenv("NETDATA_PLUGINS_DIR", verify_required_directory(netdata_configured_primary_plugins_dir), 1); + nd_setenv("NETDATA_WEB_DIR", verify_required_directory(netdata_configured_web_dir), 1); + nd_setenv("NETDATA_CACHE_DIR", verify_or_create_required_directory(netdata_configured_cache_dir), 1); + nd_setenv("NETDATA_LIB_DIR", verify_or_create_required_directory(netdata_configured_varlib_dir), 1); + nd_setenv("NETDATA_LOCK_DIR", verify_or_create_required_directory(netdata_configured_lock_dir), 1); + nd_setenv("NETDATA_LOG_DIR", verify_or_create_required_directory(netdata_configured_log_dir), 1); + nd_setenv("NETDATA_HOST_PREFIX", netdata_configured_host_prefix, 1); + + nd_setenv("CLAIMING_DIR", verify_or_create_required_private_directory(netdata_configured_cloud_dir), 1); + + { + BUFFER *user_plugins_dirs = buffer_create(FILENAME_MAX, NULL); + + for (size_t i = 1; i < PLUGINSD_MAX_DIRECTORIES && plugin_directories[i]; i++) { + if (i > 1) + buffer_strcat(user_plugins_dirs, " "); + buffer_strcat(user_plugins_dirs, plugin_directories[i]); + } + + nd_setenv("NETDATA_USER_PLUGINS_DIRS", buffer_tostring(user_plugins_dirs), 1); + + buffer_free(user_plugins_dirs); + } + + char *default_port = appconfig_get(&netdata_config, CONFIG_SECTION_WEB, "default port", NULL); + int clean = 0; + if (!default_port) { + default_port = strdupz("19999"); + clean = 1; + } + + nd_setenv("NETDATA_LISTEN_PORT", default_port, 1); + if (clean) + freez(default_port); + + // set the path we need + char path[4096], *p = getenv("PATH"); + if (!p) p = "/bin:/usr/bin"; + snprintfz(path, sizeof(path), "%s:%s", p, "/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"); + setenv("PATH", config_get(CONFIG_SECTION_ENV_VARS, "PATH", path), 1); + + // python options + p = getenv("PYTHONPATH"); + if (!p) p = ""; + setenv("PYTHONPATH", config_get(CONFIG_SECTION_ENV_VARS, "PYTHONPATH", p), 1); + + // disable buffering for python plugins + setenv("PYTHONUNBUFFERED", "1", 1); + + // switch to standard locale for plugins + setenv("LC_ALL", "C", 1); +} diff --git a/src/daemon/event_loop.c b/src/daemon/libuv_workers.c similarity index 99% rename from src/daemon/event_loop.c rename to src/daemon/libuv_workers.c index d1908ec15d7e5f..441002d068d53d 100644 --- a/src/daemon/event_loop.c +++ b/src/daemon/libuv_workers.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include -#include "event_loop.h" +#include "libuv_workers.h" // Register workers void register_libuv_worker_jobs() { diff --git a/src/daemon/event_loop.h b/src/daemon/libuv_workers.h similarity index 100% rename from src/daemon/event_loop.h rename to src/daemon/libuv_workers.h diff --git a/src/daemon/main.c b/src/daemon/main.c index 17fef84495a51b..29ad8bbf9117ea 100644 --- a/src/daemon/main.c +++ b/src/daemon/main.c @@ -6,6 +6,7 @@ #include "static_threads.h" #include "database/engine/page_test.h" +#include #ifdef OS_WINDOWS #include "win_system-info.h" @@ -480,15 +481,13 @@ void netdata_cleanup_and_exit(int ret, const char *action, const char *action_re // unlink the pid - if(pidfile[0]) { + if(pidfile && *pidfile) { if(unlink(pidfile) != 0) netdata_log_error("EXIT: cannot unlink pidfile '%s'.", pidfile); } watcher_step_complete(WATCHER_STEP_ID_REMOVE_PID_FILE); -#ifdef ENABLE_HTTPS netdata_ssl_cleanup(); -#endif watcher_step_complete(WATCHER_STEP_ID_FREE_OPENSSL_STRUCTURES); (void) unlink(agent_incomplete_shutdown_file); @@ -496,6 +495,7 @@ void netdata_cleanup_and_exit(int ret, const char *action, const char *action_re watcher_shutdown_end(); watcher_thread_stop(); + curl_global_cleanup(); #ifdef OS_WINDOWS return; @@ -807,8 +807,6 @@ int help(int exitcode) { " are enabled or not, in JSON format.\n\n" " -W simple-pattern pattern string\n" " Check if string matches pattern and exit.\n\n" - " -W \"claim -token=TOKEN -rooms=ROOM1,ROOM2\"\n" - " Claim the agent to the workspace rooms pointed to by TOKEN and ROOM*.\n\n" #ifdef OS_WINDOWS " -W perflibdump [key]\n" " Dump the Windows Performance Counters Registry in JSON.\n\n" @@ -825,7 +823,6 @@ int help(int exitcode) { return exitcode; } -#ifdef ENABLE_HTTPS static void security_init(){ char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s/ssl/key.pem",netdata_configured_user_config_dir); @@ -839,7 +836,6 @@ static void security_init(){ netdata_ssl_initialize_openssl(); } -#endif static void log_init(void) { nd_log_set_facility(config_get(CONFIG_SECTION_LOGS, "facility", "daemon")); @@ -881,21 +877,19 @@ static void log_init(void) { snprintfz(filename, FILENAME_MAX, "%s/health.log", netdata_configured_log_dir); nd_log_set_user_settings(NDLS_HEALTH, config_get(CONFIG_SECTION_LOGS, "health", filename)); -#ifdef ENABLE_ACLK aclklog_enabled = config_get_boolean(CONFIG_SECTION_CLOUD, "conversation log", CONFIG_BOOLEAN_NO); if (aclklog_enabled) { snprintfz(filename, FILENAME_MAX, "%s/aclk.log", netdata_configured_log_dir); nd_log_set_user_settings(NDLS_ACLK, config_get(CONFIG_SECTION_CLOUD, "conversation log file", filename)); } -#endif + + aclk_config_get_query_scope(); } -char *initialize_lock_directory_path(char *prefix) -{ +static char *get_varlib_subdir_from_config(const char *prefix, const char *dir) { char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/lock", prefix); - - return config_get(CONFIG_SECTION_DIRECTORIES, "lock", filename); + snprintfz(filename, FILENAME_MAX, "%s/%s", prefix, dir); + return config_get(CONFIG_SECTION_DIRECTORIES, dir, filename); } static void backwards_compatible_config() { @@ -1175,7 +1169,8 @@ static void get_netdata_configured_variables() netdata_configured_cache_dir = config_get(CONFIG_SECTION_DIRECTORIES, "cache", netdata_configured_cache_dir); netdata_configured_varlib_dir = config_get(CONFIG_SECTION_DIRECTORIES, "lib", netdata_configured_varlib_dir); - netdata_configured_lock_dir = initialize_lock_directory_path(netdata_configured_varlib_dir); + netdata_configured_lock_dir = get_varlib_subdir_from_config(netdata_configured_varlib_dir, "lock"); + netdata_configured_cloud_dir = get_varlib_subdir_from_config(netdata_configured_varlib_dir, "cloud.d"); { pluginsd_initialize_plugin_directories(); @@ -1309,14 +1304,14 @@ static bool load_netdata_conf(char *filename, char overwrite_used, char **user) netdata_log_error("CONFIG: cannot load config file '%s'.", filename); } else { - filename = strdupz_path_subpath(netdata_configured_user_config_dir, "netdata.conf"); + filename = filename_from_path_entry_strdupz(netdata_configured_user_config_dir, "netdata.conf"); ret = config_load(filename, overwrite_used, NULL); if(!ret) { netdata_log_info("CONFIG: cannot load user config '%s'. Will try the stock version.", filename); freez(filename); - filename = strdupz_path_subpath(netdata_configured_stock_config_dir, "netdata.conf"); + filename = filename_from_path_entry_strdupz(netdata_configured_stock_config_dir, "netdata.conf"); ret = config_load(filename, overwrite_used, NULL); if(!ret) netdata_log_info("CONFIG: cannot load stock config '%s'. Running with internal defaults.", filename); @@ -1351,7 +1346,7 @@ int get_system_info(struct rrdhost_system_info *system_info) { char line[200 + 1]; // Removed the double strlens, if the Coverity tainted string warning reappears I'll revert. // One time init code, but I'm curious about the warning... - while (fgets(line, 200, instance->child_stdout_fp) != NULL) { + while (fgets(line, 200, spawn_popen_stdout(instance)) != NULL) { char *value=line; while (*value && *value != '=') value++; if (*value=='=') { @@ -1366,7 +1361,7 @@ int get_system_info(struct rrdhost_system_info *system_info) { if(unlikely(rrdhost_set_system_info_variable(system_info, line, value))) { netdata_log_error("Unexpected environment variable %s=%s", line, value); } else { - setenv(line, value, 1); + nd_setenv(line, value, 1); } } } @@ -1405,6 +1400,7 @@ int unittest_rrdpush_compressions(void); int uuid_unittest(void); int progress_unittest(void); int dyncfg_unittest(void); +bool netdata_random_session_id_generate(void); #ifdef OS_WINDOWS int windows_perflib_dump(const char *key); @@ -1455,6 +1451,8 @@ int netdata_main(int argc, char **argv) { // set the name for logging program_name = "netdata"; + curl_global_init(CURL_GLOBAL_ALL); + // parse options { int num_opts = sizeof(option_definitions) / sizeof(struct option_def); @@ -1483,7 +1481,7 @@ int netdata_main(int argc, char **argv) { } else { netdata_log_debug(D_OPTIONS, "Configuration loaded from %s.", optarg); - load_cloud_conf(1); + cloud_conf_load(1); config_loaded = 1; } break; @@ -1499,8 +1497,7 @@ int netdata_main(int argc, char **argv) { config_set(CONFIG_SECTION_WEB, "bind to", optarg); break; case 'P': - strncpy(pidfile, optarg, FILENAME_MAX); - pidfile[FILENAME_MAX] = '\0'; + pidfile = strdupz(optarg); break; case 'p': config_set(CONFIG_SECTION_GLOBAL, "default port", optarg); @@ -1522,7 +1519,6 @@ int netdata_main(int argc, char **argv) { { char* stacksize_string = "stacksize="; char* debug_flags_string = "debug_flags="; - char* claim_string = "claim"; #ifdef ENABLE_DBENGINE char* createdataset_string = "createdataset="; char* stresstest_string = "stresstest="; @@ -1870,7 +1866,7 @@ int netdata_main(int argc, char **argv) { if(!config_loaded) { fprintf(stderr, "warning: no configuration file has been loaded. Use -c CONFIG_FILE, before -W get. Using default config.\n"); load_netdata_conf(NULL, 0, &user); - load_cloud_conf(1); + cloud_conf_load(1); } get_netdata_configured_variables(); @@ -1884,10 +1880,6 @@ int netdata_main(int argc, char **argv) { printf("%s\n", value); return 0; } - else if(strncmp(optarg, claim_string, strlen(claim_string)) == 0) { - /* will trigger a claiming attempt when the agent is initialized */ - claiming_pending_arguments = optarg + strlen(claim_string); - } else if(strcmp(optarg, "buildinfo") == 0) { print_build_info(); return 0; @@ -1919,12 +1911,12 @@ int netdata_main(int argc, char **argv) { if (close_open_fds == true) { // close all open file descriptors, except the standard ones // the caller may have left open files (lxc-attach has this issue) - os_close_all_non_std_open_fds_except(NULL, 0); + os_close_all_non_std_open_fds_except(NULL, 0, 0); } if(!config_loaded) { load_netdata_conf(NULL, 0, &user); - load_cloud_conf(0); + cloud_conf_load(0); } // ------------------------------------------------------------------------ @@ -1970,7 +1962,8 @@ int netdata_main(int argc, char **argv) { // prepare configuration environment variables for the plugins get_netdata_configured_variables(); - set_global_environment(); + set_environment_for_plugins_and_scripts(); + analytics_reset(); // work while we are cd into config_dir // to allow the plugins refer to their config @@ -1987,7 +1980,7 @@ int netdata_main(int argc, char **argv) { // get the debugging flags from the configuration file char *flags = config_get(CONFIG_SECTION_LOGS, "debug flags", "0x0000000000000000"); - setenv("NETDATA_DEBUG_FLAGS", flags, 1); + nd_setenv("NETDATA_DEBUG_FLAGS", flags, 1); debug_flags = strtoull(flags, NULL, 0); netdata_log_debug(D_OPTIONS, "Debug flags set to '0x%" PRIX64 "'.", debug_flags); @@ -2021,8 +2014,6 @@ int netdata_main(int argc, char **argv) { get_system_timezone(); - bearer_tokens_init(); - replication_initialize(); rrd_functions_inflight_init(); @@ -2030,9 +2021,7 @@ int netdata_main(int argc, char **argv) { // -------------------------------------------------------------------- // get the certificate and start security -#ifdef ENABLE_HTTPS security_init(); -#endif // -------------------------------------------------------------------- // This is the safest place to start the SILENCERS structure @@ -2053,8 +2042,7 @@ int netdata_main(int argc, char **argv) { // this causes the threads to block signals. delta_startup_time("initialize signals"); - signals_block(); - signals_init(); // setup the signals we want to use + nd_initialize_signals(); // setup the signals we want to use // -------------------------------------------------------------------- // check which threads are enabled and initialize them @@ -2086,7 +2074,7 @@ int netdata_main(int argc, char **argv) { st->init_routine(); if(st->env_name) - setenv(st->env_name, st->enabled?"YES":"NO", 1); + nd_setenv(st->env_name, st->enabled?"YES":"NO", 1); if(st->global_variable) *st->global_variable = (st->enabled) ? true : false; @@ -2097,7 +2085,7 @@ int netdata_main(int argc, char **argv) { delta_startup_time("initialize web server"); - web_client_api_v1_init(); + nd_web_api_init(); web_server_threading_selection(); if(web_server_mode != WEB_SERVER_MODE_NONE) { @@ -2165,7 +2153,7 @@ int netdata_main(int argc, char **argv) { netdata_configured_home_dir = config_get(CONFIG_SECTION_DIRECTORIES, "home", pw->pw_dir); } - setenv("HOME", netdata_configured_home_dir, 1); + nd_setenv("HOME", netdata_configured_home_dir, 1); dyncfg_init(true); @@ -2178,6 +2166,7 @@ int netdata_main(int argc, char **argv) { // initialize internal registry delta_startup_time("initialize registry"); registry_init(); + cloud_conf_init_after_registry(); netdata_random_session_id_generate(); // ------------------------------------------------------------------------ @@ -2203,7 +2192,7 @@ int netdata_main(int argc, char **argv) { delta_startup_time("initialize RRD structures"); if(rrd_init(netdata_configured_hostname, system_info, false)) { - set_late_global_environment(system_info); + set_late_analytics_variables(system_info); fatal("Cannot initialize localhost instance with name '%s'.", netdata_configured_hostname); } @@ -2219,15 +2208,10 @@ int netdata_main(int argc, char **argv) { if (fd >= 0) close(fd); - // ------------------------------------------------------------------------ // Claim netdata agent to a cloud endpoint delta_startup_time("collect claiming info"); - - if (claiming_pending_arguments) - claim_agent(claiming_pending_arguments, false, NULL); - load_claiming_state(); // ------------------------------------------------------------------------ @@ -2242,11 +2226,13 @@ int netdata_main(int argc, char **argv) { // ------------------------------------------------------------------------ // spawn the threads + bearer_tokens_init(); + delta_startup_time("start the static threads"); web_server_config_options(); - set_late_global_environment(system_info); + set_late_analytics_variables(system_info); for (i = 0; static_threads[i].name != NULL ; i++) { struct netdata_static_thread *st = &static_threads[i]; @@ -2295,28 +2281,7 @@ int netdata_main(int argc, char **argv) { } } - // ------------------------------------------------------------------------ - // Report ACLK build failure -#ifndef ENABLE_ACLK - netdata_log_error("This agent doesn't have ACLK."); - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/.aclk_report_sent", netdata_configured_varlib_dir); - if (netdata_anonymous_statistics_enabled > 0 && access(filename, F_OK)) { // -1 -> not initialized - analytics_statistic_t statistic = { "ACLK_DISABLED", "-", "-" }; - analytics_statistic_send(&statistic); - - int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 444); - if (fd == -1) - netdata_log_error("Cannot create file '%s'. Please fix this.", filename); - else - close(fd); - } -#endif - webrtc_initialize(); - - signals_unblock(); - return 10; } @@ -2327,7 +2292,7 @@ int main(int argc, char *argv[]) if (rc != 10) return rc; - signals_handle(); + nd_process_signals(); return 1; } #endif diff --git a/src/daemon/service.c b/src/daemon/service.c index ead6334450e8d2..e3968fd8f58e04 100644 --- a/src/daemon/service.c +++ b/src/daemon/service.c @@ -203,7 +203,7 @@ static void svc_rrd_cleanup_obsolete_charts_from_all_hosts() { if (host == localhost) continue; - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); time_t now = now_realtime_sec(); @@ -215,7 +215,7 @@ static void svc_rrd_cleanup_obsolete_charts_from_all_hosts() { host->trigger_chart_obsoletion_check = 0; } - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); } rrd_rdunlock(); @@ -247,14 +247,12 @@ static void svc_rrdhost_cleanup_orphan_hosts(RRDHOST *protected_host) { } worker_is_busy(WORKER_JOB_FREE_HOST); -#ifdef ENABLE_ACLK // in case we have cloud connection we inform cloud // a child disconnected - if (netdata_cloud_enabled && force) { + if (force) { aclk_host_state_update(host, 0, 0); unregister_node(host->machine_guid); } -#endif rrdhost_free___while_having_rrd_wrlock(host, force); goto restart_after_removal; } diff --git a/src/daemon/signals.c b/src/daemon/signals.c index 4e4d7c4d4156e6..163f92ad8bc29d 100644 --- a/src/daemon/signals.c +++ b/src/daemon/signals.c @@ -2,12 +2,6 @@ #include "common.h" -/* - * IMPORTANT: Libuv uv_spawn() uses SIGCHLD internally: - * https://github.com/libuv/libuv/blob/cc51217a317e96510fbb284721d5e6bc2af31e33/src/unix/process.c#L485 - * Extreme care is needed when mixing and matching POSIX and libuv. - */ - typedef enum signal_action { NETDATA_SIGNAL_END_OF_LIST, NETDATA_SIGNAL_IGNORE, @@ -56,24 +50,33 @@ static void signal_handler(int signo) { } } -void signals_block(void) { +// Mask all signals, to ensure they will only be unmasked at the threads that can handle them. +// This means that all third party libraries (including libuv) cannot use signals anymore. +// The signals they are interested must be unblocked at their corresponding event loops. +static void posix_mask_all_signals(void) { sigset_t sigset; sigfillset(&sigset); - if(pthread_sigmask(SIG_BLOCK, &sigset, NULL) == -1) - netdata_log_error("SIGNAL: Could not block signals for threads"); + if(pthread_sigmask(SIG_BLOCK, &sigset, NULL) != 0) + netdata_log_error("SIGNAL: cannot mask all signals"); } -void signals_unblock(void) { +// Unmask all signals the netdata main signal handler uses. +// All other signals remain masked. +static void posix_unmask_my_signals(void) { sigset_t sigset; - sigfillset(&sigset); + sigemptyset(&sigset); - if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) == -1) { - netdata_log_error("SIGNAL: Could not unblock signals for threads"); - } + for (int i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) + sigaddset(&sigset, signals_waiting[i].signo); + + if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) != 0) + netdata_log_error("SIGNAL: cannot unmask netdata signals"); } -void signals_init(void) { +void nd_initialize_signals(void) { + posix_mask_all_signals(); // block all signals for all threads + // Catch signals which we want to use struct sigaction sa; sa.sa_flags = 0; @@ -97,22 +100,10 @@ void signals_init(void) { } } -void signals_reset(void) { - struct sigaction sa; - sigemptyset(&sa.sa_mask); - sa.sa_handler = SIG_DFL; - sa.sa_flags = 0; - - int i; - for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) { - if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1) - netdata_log_error("SIGNAL: Failed to reset signal handler for: %s", signals_waiting[i].name); - } -} +void nd_process_signals(void) { + posix_unmask_my_signals(); -void signals_handle(void) { while(1) { - // pause() causes the calling process (or thread) to sleep until a signal // is delivered that either terminates the process or causes the invocation // of a signal-catching function. diff --git a/src/daemon/signals.h b/src/daemon/signals.h index 26dbc6dcdc5776..897b2b7f0c5620 100644 --- a/src/daemon/signals.h +++ b/src/daemon/signals.h @@ -3,10 +3,7 @@ #ifndef NETDATA_SIGNALS_H #define NETDATA_SIGNALS_H 1 -void signals_init(void); -void signals_block(void); -void signals_unblock(void); -void signals_reset(void); -void signals_handle(void) NORETURN; +void nd_initialize_signals(void); +void nd_process_signals(void) NORETURN; #endif //NETDATA_SIGNALS_H diff --git a/src/daemon/static_threads.c b/src/daemon/static_threads.c index c6ec799560a693..3e5b7e3502e867 100644 --- a/src/daemon/static_threads.c +++ b/src/daemon/static_threads.c @@ -133,7 +133,6 @@ const struct netdata_static_thread static_threads_common[] = { }, #endif -#ifdef ENABLE_ACLK { .name = "ACLK_MAIN", .config_section = NULL, @@ -143,7 +142,6 @@ const struct netdata_static_thread static_threads_common[] = { .init_routine = NULL, .start_routine = aclk_main }, -#endif { .name = "RRDCONTEXT", diff --git a/src/daemon/unit_test.c b/src/daemon/unit_test.c index 0f15f67d7656f6..46166d673e05f0 100644 --- a/src/daemon/unit_test.c +++ b/src/daemon/unit_test.c @@ -1437,8 +1437,8 @@ int check_strdupz_path_subpath() { size_t i; for(i = 0; checks[i].result ; i++) { - char *s = strdupz_path_subpath(checks[i].path, checks[i].subpath); - fprintf(stderr, "strdupz_path_subpath(\"%s\", \"%s\") = \"%s\": ", checks[i].path, checks[i].subpath, s); + char *s = filename_from_path_entry_strdupz(checks[i].path, checks[i].subpath); + fprintf(stderr, "filename_from_path_entry_strdupz(\"%s\", \"%s\") = \"%s\": ", checks[i].path, checks[i].subpath, s); if(!s || strcmp(s, checks[i].result) != 0) { freez(s); fprintf(stderr, "FAILED\n"); diff --git a/src/daemon/winsvc.cc b/src/daemon/winsvc.cc index 9c5eb49ff98773..23ade2895e780d 100644 --- a/src/daemon/winsvc.cc +++ b/src/daemon/winsvc.cc @@ -4,7 +4,7 @@ extern "C" { #include "libnetdata/libnetdata.h" int netdata_main(int argc, char *argv[]); -void signals_handle(void); +void nd_process_signals(void); } @@ -231,7 +231,7 @@ int main(int argc, char *argv[]) if (rc != 10) return rc; - signals_handle(); + nd_process_signals(); return 1; } else diff --git a/src/database/contexts/api_v1.c b/src/database/contexts/api_v1_contexts.c similarity index 98% rename from src/database/contexts/api_v1.c rename to src/database/contexts/api_v1_contexts.c index 355aaf91a360f1..d9bb21db3dc4cd 100644 --- a/src/database/contexts/api_v1.c +++ b/src/database/contexts/api_v1_contexts.c @@ -399,8 +399,8 @@ int rrdcontexts_to_json(RRDHOST *host, BUFFER *wb, time_t after, time_t before, char node_uuid[UUID_STR_LEN] = ""; - if(host->node_id) - uuid_unparse(*host->node_id, node_uuid); + if(!uuid_is_null(host->node_id)) + uuid_unparse_lower(host->node_id, node_uuid); if(after != 0 && before != 0) rrdr_relative_window_to_absolute_query(&after, &before, NULL, false); @@ -409,7 +409,8 @@ int rrdcontexts_to_json(RRDHOST *host, BUFFER *wb, time_t after, time_t before, buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(host)); buffer_json_member_add_string(wb, "machine_guid", host->machine_guid); buffer_json_member_add_string(wb, "node_id", node_uuid); - buffer_json_member_add_string(wb, "claim_id", host->aclk_state.claimed_id ? host->aclk_state.claimed_id : ""); + CLAIM_ID claim_id = rrdhost_claim_id_get(host); + buffer_json_member_add_string(wb, "claim_id", claim_id.str); if(options & RRDCONTEXT_OPTION_SHOW_LABELS) { buffer_json_member_add_object(wb, "host_labels"); diff --git a/src/database/contexts/api_v2.c b/src/database/contexts/api_v2.c deleted file mode 100644 index 07cd3ac8385232..00000000000000 --- a/src/database/contexts/api_v2.c +++ /dev/null @@ -1,2454 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "internal.h" - -#include "aclk/aclk_capas.h" - -// ---------------------------------------------------------------------------- -// /api/v2/contexts API - -struct alert_transitions_facets alert_transition_facets[] = { - [ATF_STATUS] = { - .id = "f_status", - .name = "Alert Status", - .query_param = "f_status", - .order = 1, - }, - [ATF_TYPE] = { - .id = "f_type", - .name = "Alert Type", - .query_param = "f_type", - .order = 2, - }, - [ATF_ROLE] = { - .id = "f_role", - .name = "Recipient Role", - .query_param = "f_role", - .order = 3, - }, - [ATF_CLASS] = { - .id = "f_class", - .name = "Alert Class", - .query_param = "f_class", - .order = 4, - }, - [ATF_COMPONENT] = { - .id = "f_component", - .name = "Alert Component", - .query_param = "f_component", - .order = 5, - }, - [ATF_NODE] = { - .id = "f_node", - .name = "Alert Node", - .query_param = "f_node", - .order = 6, - }, - [ATF_ALERT_NAME] = { - .id = "f_alert", - .name = "Alert Name", - .query_param = "f_alert", - .order = 7, - }, - [ATF_CHART_NAME] = { - .id = "f_instance", - .name = "Instance Name", - .query_param = "f_instance", - .order = 8, - }, - [ATF_CONTEXT] = { - .id = "f_context", - .name = "Context", - .query_param = "f_context", - .order = 9, - }, - - // terminator - [ATF_TOTAL_ENTRIES] = { - .id = NULL, - .name = NULL, - .query_param = NULL, - .order = 9999, - } -}; - -struct facet_entry { - uint32_t count; -}; - -struct alert_transitions_callback_data { - struct rrdcontext_to_json_v2_data *ctl; - BUFFER *wb; - bool debug; - bool only_one_config; - - struct { - SIMPLE_PATTERN *pattern; - DICTIONARY *dict; - } facets[ATF_TOTAL_ENTRIES]; - - uint32_t max_items_to_return; - uint32_t items_to_return; - - uint32_t items_evaluated; - uint32_t items_matched; - - - struct sql_alert_transition_fixed_size *base; // double linked list - last item is base->prev - struct sql_alert_transition_fixed_size *last_added; // the last item added, not the last of the list - - struct { - size_t first; - size_t skips_before; - size_t skips_after; - size_t backwards; - size_t forwards; - size_t prepend; - size_t append; - size_t shifts; - } operations; - - uint32_t configs_added; -}; - -typedef enum __attribute__ ((__packed__)) { - FTS_MATCHED_NONE = 0, - FTS_MATCHED_HOST, - FTS_MATCHED_CONTEXT, - FTS_MATCHED_INSTANCE, - FTS_MATCHED_DIMENSION, - FTS_MATCHED_LABEL, - FTS_MATCHED_ALERT, - FTS_MATCHED_ALERT_INFO, - FTS_MATCHED_FAMILY, - FTS_MATCHED_TITLE, - FTS_MATCHED_UNITS, -} FTS_MATCH; - -static const char *fts_match_to_string(FTS_MATCH match) { - switch(match) { - case FTS_MATCHED_HOST: - return "HOST"; - - case FTS_MATCHED_CONTEXT: - return "CONTEXT"; - - case FTS_MATCHED_INSTANCE: - return "INSTANCE"; - - case FTS_MATCHED_DIMENSION: - return "DIMENSION"; - - case FTS_MATCHED_ALERT: - return "ALERT"; - - case FTS_MATCHED_ALERT_INFO: - return "ALERT_INFO"; - - case FTS_MATCHED_LABEL: - return "LABEL"; - - case FTS_MATCHED_FAMILY: - return "FAMILY"; - - case FTS_MATCHED_TITLE: - return "TITLE"; - - case FTS_MATCHED_UNITS: - return "UNITS"; - - default: - return "NONE"; - } -} - -struct function_v2_entry { - size_t size; - size_t used; - size_t *node_ids; - STRING *help; - STRING *tags; - HTTP_ACCESS access; - int priority; -}; - -struct context_v2_entry { - size_t count; - STRING *id; - STRING *family; - uint32_t priority; - time_t first_time_s; - time_t last_time_s; - RRD_FLAGS flags; - FTS_MATCH match; -}; - -struct alert_counts { - size_t critical; - size_t warning; - size_t clear; - size_t error; -}; - -struct alert_v2_entry { - RRDCALC *tmp; - - STRING *name; - STRING *summary; - RRDLABELS *recipient; - RRDLABELS *classification; - RRDLABELS *context; - RRDLABELS *component; - RRDLABELS *type; - - size_t ati; - - struct alert_counts counts; - - size_t instances; - DICTIONARY *nodes; - DICTIONARY *configs; -}; - -struct alert_by_x_entry { - struct { - struct alert_counts counts; - size_t silent; - size_t total; - } running; - - struct { - size_t available; - } prototypes; -}; - -typedef struct full_text_search_index { - size_t searches; - size_t string_searches; - size_t char_searches; -} FTS_INDEX; - -static inline bool full_text_search_string(FTS_INDEX *fts, SIMPLE_PATTERN *q, STRING *ptr) { - fts->searches++; - fts->string_searches++; - return simple_pattern_matches_string(q, ptr); -} - -static inline bool full_text_search_char(FTS_INDEX *fts, SIMPLE_PATTERN *q, char *ptr) { - fts->searches++; - fts->char_searches++; - return simple_pattern_matches(q, ptr); -} - -struct contexts_v2_node { - size_t ni; - RRDHOST *host; -}; - -struct rrdcontext_to_json_v2_data { - time_t now; - - BUFFER *wb; - struct api_v2_contexts_request *request; - - CONTEXTS_V2_MODE mode; - CONTEXTS_V2_OPTIONS options; - struct query_versions versions; - - struct { - SIMPLE_PATTERN *scope_pattern; - SIMPLE_PATTERN *pattern; - size_t ni; - DICTIONARY *dict; // the result set - } nodes; - - struct { - SIMPLE_PATTERN *scope_pattern; - SIMPLE_PATTERN *pattern; - size_t ci; - DICTIONARY *dict; // the result set - } contexts; - - struct { - SIMPLE_PATTERN *alert_name_pattern; - time_t alarm_id_filter; - - size_t ati; - - DICTIONARY *summary; - DICTIONARY *alert_instances; - - DICTIONARY *by_type; - DICTIONARY *by_component; - DICTIONARY *by_classification; - DICTIONARY *by_recipient; - DICTIONARY *by_module; - } alerts; - - struct { - FTS_MATCH host_match; - char host_node_id_str[UUID_STR_LEN]; - SIMPLE_PATTERN *pattern; - FTS_INDEX fts; - } q; - - struct { - DICTIONARY *dict; // the result set - } functions; - - struct { - bool enabled; - bool relative; - time_t after; - time_t before; - } window; - - struct query_timings timings; -}; - -static void alert_counts_add(struct alert_counts *t, RRDCALC *rc) { - switch(rc->status) { - case RRDCALC_STATUS_CRITICAL: - t->critical++; - break; - - case RRDCALC_STATUS_WARNING: - t->warning++; - break; - - case RRDCALC_STATUS_CLEAR: - t->clear++; - break; - - case RRDCALC_STATUS_REMOVED: - case RRDCALC_STATUS_UNINITIALIZED: - break; - - case RRDCALC_STATUS_UNDEFINED: - default: - if(!netdata_double_isnumber(rc->value)) - t->error++; - - break; - } -} - -static void alerts_v2_add(struct alert_v2_entry *t, RRDCALC *rc) { - t->instances++; - - alert_counts_add(&t->counts, rc); - - dictionary_set(t->nodes, rc->rrdset->rrdhost->machine_guid, NULL, 0); - - char key[UUID_STR_LEN + 1]; - uuid_unparse_lower(rc->config.hash_id, key); - dictionary_set(t->configs, key, NULL, 0); -} - -static void alerts_by_x_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { - static STRING *silent = NULL; - if(unlikely(!silent)) silent = string_strdupz("silent"); - - struct alert_by_x_entry *b = value; - RRDCALC *rc = data; - if(!rc) { - // prototype - b->prototypes.available++; - } - else { - alert_counts_add(&b->running.counts, rc); - - b->running.total++; - - if (rc->config.recipient == silent) - b->running.silent++; - } -} - -static bool alerts_by_x_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value __maybe_unused, void *data __maybe_unused) { - alerts_by_x_insert_callback(item, old_value, data); - return false; -} - -static void alerts_v2_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { - struct rrdcontext_to_json_v2_data *ctl = data; - struct alert_v2_entry *t = value; - RRDCALC *rc = t->tmp; - t->name = rc->config.name; - t->summary = rc->config.summary; // the original summary - t->context = rrdlabels_create(); - t->recipient = rrdlabels_create(); - t->classification = rrdlabels_create(); - t->component = rrdlabels_create(); - t->type = rrdlabels_create(); - if (string_strlen(rc->rrdset->context)) - rrdlabels_add(t->context, string2str(rc->rrdset->context), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.recipient)) - rrdlabels_add(t->recipient, string2str(rc->config.recipient), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.classification)) - rrdlabels_add(t->classification, string2str(rc->config.classification), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.component)) - rrdlabels_add(t->component, string2str(rc->config.component), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.type)) - rrdlabels_add(t->type, string2str(rc->config.type), "yes", RRDLABEL_SRC_AUTO); - t->ati = ctl->alerts.ati++; - - t->nodes = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_VALUE_LINK_DONT_CLONE|DICT_OPTION_NAME_LINK_DONT_CLONE); - t->configs = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_VALUE_LINK_DONT_CLONE|DICT_OPTION_NAME_LINK_DONT_CLONE); - - alerts_v2_add(t, rc); -} - -static bool alerts_v2_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) { - struct alert_v2_entry *t = old_value, *n = new_value; - RRDCALC *rc = n->tmp; - if (string_strlen(rc->rrdset->context)) - rrdlabels_add(t->context, string2str(rc->rrdset->context), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.recipient)) - rrdlabels_add(t->recipient, string2str(rc->config.recipient), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.classification)) - rrdlabels_add(t->classification, string2str(rc->config.classification), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.component)) - rrdlabels_add(t->component, string2str(rc->config.component), "yes", RRDLABEL_SRC_AUTO); - if (string_strlen(rc->config.type)) - rrdlabels_add(t->type, string2str(rc->config.type), "yes", RRDLABEL_SRC_AUTO); - alerts_v2_add(t, rc); - return true; -} - -static void alerts_v2_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { - struct alert_v2_entry *t = value; - - rrdlabels_destroy(t->context); - rrdlabels_destroy(t->recipient); - rrdlabels_destroy(t->classification); - rrdlabels_destroy(t->component); - rrdlabels_destroy(t->type); - - dictionary_destroy(t->nodes); - dictionary_destroy(t->configs); -} - -static void alert_instances_v2_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { - struct rrdcontext_to_json_v2_data *ctl = data; - struct sql_alert_instance_v2_entry *t = value; - RRDCALC *rc = t->tmp; - - t->context = rc->rrdset->context; - t->chart_id = rc->rrdset->id; - t->chart_name = rc->rrdset->name; - t->family = rc->rrdset->family; - t->units = rc->config.units; - t->classification = rc->config.classification; - t->type = rc->config.type; - t->recipient = rc->config.recipient; - t->component = rc->config.component; - t->name = rc->config.name; - t->source = rc->config.source; - t->status = rc->status; - t->flags = rc->run_flags; - t->info = rc->config.info; - t->summary = rc->summary; - t->value = rc->value; - t->last_updated = rc->last_updated; - t->last_status_change = rc->last_status_change; - t->last_status_change_value = rc->last_status_change_value; - t->host = rc->rrdset->rrdhost; - t->alarm_id = rc->id; - t->ni = ctl->nodes.ni; - - uuid_copy(t->config_hash_id, rc->config.hash_id); - health_alarm_log_get_global_id_and_transition_id_for_rrdcalc(rc, &t->global_id, &t->last_transition_id); -} - -static bool alert_instances_v2_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value __maybe_unused, void *new_value __maybe_unused, void *data __maybe_unused) { - internal_fatal(true, "This should never happen!"); - return true; -} - -static void alert_instances_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value __maybe_unused, void *data __maybe_unused) { - ; -} - -static FTS_MATCH rrdcontext_to_json_v2_full_text_search(struct rrdcontext_to_json_v2_data *ctl, RRDCONTEXT *rc, SIMPLE_PATTERN *q) { - if(unlikely(full_text_search_string(&ctl->q.fts, q, rc->id) || - full_text_search_string(&ctl->q.fts, q, rc->family))) - return FTS_MATCHED_CONTEXT; - - if(unlikely(full_text_search_string(&ctl->q.fts, q, rc->title))) - return FTS_MATCHED_TITLE; - - if(unlikely(full_text_search_string(&ctl->q.fts, q, rc->units))) - return FTS_MATCHED_UNITS; - - FTS_MATCH matched = FTS_MATCHED_NONE; - RRDINSTANCE *ri; - dfe_start_read(rc->rrdinstances, ri) { - if(matched) break; - - if(ctl->window.enabled && !query_matches_retention(ctl->window.after, ctl->window.before, ri->first_time_s, (ri->flags & RRD_FLAG_COLLECTED) ? ctl->now : ri->last_time_s, 0)) - continue; - - if(unlikely(full_text_search_string(&ctl->q.fts, q, ri->id)) || - (ri->name != ri->id && full_text_search_string(&ctl->q.fts, q, ri->name))) { - matched = FTS_MATCHED_INSTANCE; - break; - } - - RRDMETRIC *rm; - dfe_start_read(ri->rrdmetrics, rm) { - if(ctl->window.enabled && !query_matches_retention(ctl->window.after, ctl->window.before, rm->first_time_s, (rm->flags & RRD_FLAG_COLLECTED) ? ctl->now : rm->last_time_s, 0)) - continue; - - if(unlikely(full_text_search_string(&ctl->q.fts, q, rm->id)) || - (rm->name != rm->id && full_text_search_string(&ctl->q.fts, q, rm->name))) { - matched = FTS_MATCHED_DIMENSION; - break; - } - } - dfe_done(rm); - - size_t label_searches = 0; - if(unlikely(ri->rrdlabels && rrdlabels_entries(ri->rrdlabels) && - rrdlabels_match_simple_pattern_parsed(ri->rrdlabels, q, ':', &label_searches) == SP_MATCHED_POSITIVE)) { - ctl->q.fts.searches += label_searches; - ctl->q.fts.char_searches += label_searches; - matched = FTS_MATCHED_LABEL; - break; - } - ctl->q.fts.searches += label_searches; - ctl->q.fts.char_searches += label_searches; - - if(ri->rrdset) { - RRDSET *st = ri->rrdset; - rw_spinlock_read_lock(&st->alerts.spinlock); - for (RRDCALC *rcl = st->alerts.base; rcl; rcl = rcl->next) { - if(unlikely(full_text_search_string(&ctl->q.fts, q, rcl->config.name))) { - matched = FTS_MATCHED_ALERT; - break; - } - - if(unlikely(full_text_search_string(&ctl->q.fts, q, rcl->config.info))) { - matched = FTS_MATCHED_ALERT_INFO; - break; - } - } - rw_spinlock_read_unlock(&st->alerts.spinlock); - } - } - dfe_done(ri); - return matched; -} - -static bool rrdcontext_matches_alert(struct rrdcontext_to_json_v2_data *ctl, RRDCONTEXT *rc) { - size_t matches = 0; - RRDINSTANCE *ri; - dfe_start_read(rc->rrdinstances, ri) { - if(ri->rrdset) { - RRDSET *st = ri->rrdset; - rw_spinlock_read_lock(&st->alerts.spinlock); - for (RRDCALC *rcl = st->alerts.base; rcl; rcl = rcl->next) { - if(ctl->alerts.alert_name_pattern && !simple_pattern_matches_string(ctl->alerts.alert_name_pattern, rcl->config.name)) - continue; - - if(ctl->alerts.alarm_id_filter && ctl->alerts.alarm_id_filter != rcl->id) - continue; - - size_t m = ctl->request->alerts.status & CONTEXTS_V2_ALERT_STATUSES ? 0 : 1; - - if (!m) { - if ((ctl->request->alerts.status & CONTEXT_V2_ALERT_UNINITIALIZED) && - rcl->status == RRDCALC_STATUS_UNINITIALIZED) - m++; - - if ((ctl->request->alerts.status & CONTEXT_V2_ALERT_UNDEFINED) && - rcl->status == RRDCALC_STATUS_UNDEFINED) - m++; - - if ((ctl->request->alerts.status & CONTEXT_V2_ALERT_CLEAR) && - rcl->status == RRDCALC_STATUS_CLEAR) - m++; - - if ((ctl->request->alerts.status & CONTEXT_V2_ALERT_RAISED) && - rcl->status >= RRDCALC_STATUS_RAISED) - m++; - - if ((ctl->request->alerts.status & CONTEXT_V2_ALERT_WARNING) && - rcl->status == RRDCALC_STATUS_WARNING) - m++; - - if ((ctl->request->alerts.status & CONTEXT_V2_ALERT_CRITICAL) && - rcl->status == RRDCALC_STATUS_CRITICAL) - m++; - - if(!m) - continue; - } - - struct alert_v2_entry t = { - .tmp = rcl, - }; - struct alert_v2_entry *a2e = - dictionary_set(ctl->alerts.summary, string2str(rcl->config.name), - &t, sizeof(struct alert_v2_entry)); - size_t ati = a2e->ati; - matches++; - - dictionary_set_advanced(ctl->alerts.by_type, - string2str(rcl->config.type), - (ssize_t)string_strlen(rcl->config.type), - NULL, - sizeof(struct alert_by_x_entry), - rcl); - - dictionary_set_advanced(ctl->alerts.by_component, - string2str(rcl->config.component), - (ssize_t)string_strlen(rcl->config.component), - NULL, - sizeof(struct alert_by_x_entry), - rcl); - - dictionary_set_advanced(ctl->alerts.by_classification, - string2str(rcl->config.classification), - (ssize_t)string_strlen(rcl->config.classification), - NULL, - sizeof(struct alert_by_x_entry), - rcl); - - dictionary_set_advanced(ctl->alerts.by_recipient, - string2str(rcl->config.recipient), - (ssize_t)string_strlen(rcl->config.recipient), - NULL, - sizeof(struct alert_by_x_entry), - rcl); - - char *module = NULL; - rrdlabels_get_value_strdup_or_null(st->rrdlabels, &module, "_collect_module"); - if(!module || !*module) module = "[unset]"; - - dictionary_set_advanced(ctl->alerts.by_module, - module, - -1, - NULL, - sizeof(struct alert_by_x_entry), - rcl); - - if (ctl->options & (CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES | CONTEXT_V2_OPTION_ALERTS_WITH_VALUES)) { - char key[20 + 1]; - snprintfz(key, sizeof(key) - 1, "%p", rcl); - - struct sql_alert_instance_v2_entry z = { - .ati = ati, - .tmp = rcl, - }; - dictionary_set(ctl->alerts.alert_instances, key, &z, sizeof(z)); - } - } - rw_spinlock_read_unlock(&st->alerts.spinlock); - } - } - dfe_done(ri); - - return matches != 0; -} - - -static ssize_t rrdcontext_to_json_v2_add_context(void *data, RRDCONTEXT_ACQUIRED *rca, bool queryable_context __maybe_unused) { - struct rrdcontext_to_json_v2_data *ctl = data; - - RRDCONTEXT *rc = rrdcontext_acquired_value(rca); - - if(ctl->window.enabled && !query_matches_retention(ctl->window.after, ctl->window.before, rc->first_time_s, (rc->flags & RRD_FLAG_COLLECTED) ? ctl->now : rc->last_time_s, 0)) - return 0; // continue to next context - - FTS_MATCH match = ctl->q.host_match; - if((ctl->mode & CONTEXTS_V2_SEARCH) && ctl->q.pattern) { - match = rrdcontext_to_json_v2_full_text_search(ctl, rc, ctl->q.pattern); - - if(match == FTS_MATCHED_NONE) - return 0; // continue to next context - } - - if(ctl->mode & CONTEXTS_V2_ALERTS) { - if(!rrdcontext_matches_alert(ctl, rc)) - return 0; // continue to next context - } - - if(ctl->contexts.dict) { - struct context_v2_entry t = { - .count = 1, - .id = rc->id, - .family = string_dup(rc->family), - .priority = rc->priority, - .first_time_s = rc->first_time_s, - .last_time_s = rc->last_time_s, - .flags = rc->flags, - .match = match, - }; - - dictionary_set(ctl->contexts.dict, string2str(rc->id), &t, sizeof(struct context_v2_entry)); - } - - return 1; -} - -void buffer_json_agent_status_id(BUFFER *wb, size_t ai, usec_t duration_ut) { - buffer_json_member_add_object(wb, "st"); - { - buffer_json_member_add_uint64(wb, "ai", ai); - buffer_json_member_add_uint64(wb, "code", 200); - buffer_json_member_add_string(wb, "msg", ""); - if (duration_ut) - buffer_json_member_add_double(wb, "ms", (NETDATA_DOUBLE) duration_ut / 1000.0); - } - buffer_json_object_close(wb); -} - -void buffer_json_node_add_v2(BUFFER *wb, RRDHOST *host, size_t ni, usec_t duration_ut, bool status) { - buffer_json_member_add_string(wb, "mg", host->machine_guid); - - if(host->node_id) - buffer_json_member_add_uuid(wb, "nd", host->node_id); - buffer_json_member_add_string(wb, "nm", rrdhost_hostname(host)); - buffer_json_member_add_uint64(wb, "ni", ni); - - if(status) - buffer_json_agent_status_id(wb, 0, duration_ut); -} - -static void rrdhost_receiver_to_json(BUFFER *wb, RRDHOST_STATUS *s, const char *key) { - buffer_json_member_add_object(wb, key); - { - buffer_json_member_add_uint64(wb, "id", s->ingest.id); - buffer_json_member_add_uint64(wb, "hops", s->ingest.hops); - buffer_json_member_add_string(wb, "type", rrdhost_ingest_type_to_string(s->ingest.type)); - buffer_json_member_add_string(wb, "status", rrdhost_ingest_status_to_string(s->ingest.status)); - buffer_json_member_add_time_t(wb, "since", s->ingest.since); - buffer_json_member_add_time_t(wb, "age", s->now - s->ingest.since); - - if(s->ingest.type == RRDHOST_INGEST_TYPE_CHILD) { - if(s->ingest.status == RRDHOST_INGEST_STATUS_OFFLINE) - buffer_json_member_add_string(wb, "reason", stream_handshake_error_to_string(s->ingest.reason)); - - if(s->ingest.status == RRDHOST_INGEST_STATUS_REPLICATING) { - buffer_json_member_add_object(wb, "replication"); - { - buffer_json_member_add_boolean(wb, "in_progress", s->ingest.replication.in_progress); - buffer_json_member_add_double(wb, "completion", s->ingest.replication.completion); - buffer_json_member_add_uint64(wb, "instances", s->ingest.replication.instances); - } - buffer_json_object_close(wb); // replication - } - - if(s->ingest.status == RRDHOST_INGEST_STATUS_REPLICATING || s->ingest.status == RRDHOST_INGEST_STATUS_ONLINE) { - buffer_json_member_add_object(wb, "source"); - { - char buf[1024 + 1]; - snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->ingest.peers.local.ip, s->ingest.peers.local.port, s->ingest.ssl ? ":SSL" : ""); - buffer_json_member_add_string(wb, "local", buf); - - snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->ingest.peers.peer.ip, s->ingest.peers.peer.port, s->ingest.ssl ? ":SSL" : ""); - buffer_json_member_add_string(wb, "remote", buf); - - stream_capabilities_to_json_array(wb, s->ingest.capabilities, "capabilities"); - } - buffer_json_object_close(wb); // source - } - } - } - buffer_json_object_close(wb); // collection -} - -static void rrdhost_sender_to_json(BUFFER *wb, RRDHOST_STATUS *s, const char *key) { - if(s->stream.status == RRDHOST_STREAM_STATUS_DISABLED) - return; - - buffer_json_member_add_object(wb, key); - { - buffer_json_member_add_uint64(wb, "id", s->stream.id); - buffer_json_member_add_uint64(wb, "hops", s->stream.hops); - buffer_json_member_add_string(wb, "status", rrdhost_streaming_status_to_string(s->stream.status)); - buffer_json_member_add_time_t(wb, "since", s->stream.since); - buffer_json_member_add_time_t(wb, "age", s->now - s->stream.since); - - if (s->stream.status == RRDHOST_STREAM_STATUS_OFFLINE) - buffer_json_member_add_string(wb, "reason", stream_handshake_error_to_string(s->stream.reason)); - - if (s->stream.status == RRDHOST_STREAM_STATUS_REPLICATING) { - buffer_json_member_add_object(wb, "replication"); - { - buffer_json_member_add_boolean(wb, "in_progress", s->stream.replication.in_progress); - buffer_json_member_add_double(wb, "completion", s->stream.replication.completion); - buffer_json_member_add_uint64(wb, "instances", s->stream.replication.instances); - } - buffer_json_object_close(wb); - } - - buffer_json_member_add_object(wb, "destination"); - { - char buf[1024 + 1]; - snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->stream.peers.local.ip, s->stream.peers.local.port, s->stream.ssl ? ":SSL" : ""); - buffer_json_member_add_string(wb, "local", buf); - - snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->stream.peers.peer.ip, s->stream.peers.peer.port, s->stream.ssl ? ":SSL" : ""); - buffer_json_member_add_string(wb, "remote", buf); - - stream_capabilities_to_json_array(wb, s->stream.capabilities, "capabilities"); - - buffer_json_member_add_object(wb, "traffic"); - { - buffer_json_member_add_boolean(wb, "compression", s->stream.compression); - buffer_json_member_add_uint64(wb, "data", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DATA]); - buffer_json_member_add_uint64(wb, "metadata", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_METADATA]); - buffer_json_member_add_uint64(wb, "functions", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_FUNCTIONS]); - buffer_json_member_add_uint64(wb, "replication", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_REPLICATION]); - buffer_json_member_add_uint64(wb, "dyncfg", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DYNCFG]); - } - buffer_json_object_close(wb); // traffic - - buffer_json_member_add_array(wb, "candidates"); - struct rrdpush_destinations *d; - for (d = s->host->destinations; d; d = d->next) { - buffer_json_add_array_item_object(wb); - buffer_json_member_add_uint64(wb, "attempts", d->attempts); - { - - if (d->ssl) { - snprintfz(buf, sizeof(buf) - 1, "%s:SSL", string2str(d->destination)); - buffer_json_member_add_string(wb, "destination", buf); - } - else - buffer_json_member_add_string(wb, "destination", string2str(d->destination)); - - buffer_json_member_add_time_t(wb, "since", d->since); - buffer_json_member_add_time_t(wb, "age", s->now - d->since); - buffer_json_member_add_string(wb, "last_handshake", stream_handshake_error_to_string(d->reason)); - if(d->postpone_reconnection_until > s->now) { - buffer_json_member_add_time_t(wb, "next_check", d->postpone_reconnection_until); - buffer_json_member_add_time_t(wb, "next_in", d->postpone_reconnection_until - s->now); - } - } - buffer_json_object_close(wb); // each candidate - } - buffer_json_array_close(wb); // candidates - } - buffer_json_object_close(wb); // destination - } - buffer_json_object_close(wb); // streaming -} - -static void agent_capabilities_to_json(BUFFER *wb, RRDHOST *host, const char *key) { - buffer_json_member_add_array(wb, key); - - struct capability *capas = aclk_get_node_instance_capas(host); - for(struct capability *capa = capas; capa->name ;capa++) { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_string(wb, "name", capa->name); - buffer_json_member_add_uint64(wb, "version", capa->version); - buffer_json_member_add_boolean(wb, "enabled", capa->enabled); - } - buffer_json_object_close(wb); - } - buffer_json_array_close(wb); - freez(capas); -} - -static inline void host_dyncfg_to_json_v2(BUFFER *wb, const char *key, RRDHOST_STATUS *s) { - buffer_json_member_add_object(wb, key); - { - buffer_json_member_add_string(wb, "status", rrdhost_dyncfg_status_to_string(s->dyncfg.status)); - } - buffer_json_object_close(wb); // health - -} - -static inline void rrdhost_health_to_json_v2(BUFFER *wb, const char *key, RRDHOST_STATUS *s) { - buffer_json_member_add_object(wb, key); - { - buffer_json_member_add_string(wb, "status", rrdhost_health_status_to_string(s->health.status)); - if (s->health.status == RRDHOST_HEALTH_STATUS_RUNNING) { - buffer_json_member_add_object(wb, "alerts"); - { - buffer_json_member_add_uint64(wb, "critical", s->health.alerts.critical); - buffer_json_member_add_uint64(wb, "warning", s->health.alerts.warning); - buffer_json_member_add_uint64(wb, "clear", s->health.alerts.clear); - buffer_json_member_add_uint64(wb, "undefined", s->health.alerts.undefined); - buffer_json_member_add_uint64(wb, "uninitialized", s->health.alerts.uninitialized); - } - buffer_json_object_close(wb); // alerts - } - } - buffer_json_object_close(wb); // health -} - -static void rrdcontext_to_json_v2_rrdhost(BUFFER *wb, RRDHOST *host, struct rrdcontext_to_json_v2_data *ctl, size_t node_id) { - buffer_json_add_array_item_object(wb); // this node - buffer_json_node_add_v2(wb, host, node_id, 0, - (ctl->mode & CONTEXTS_V2_AGENTS) && !(ctl->mode & CONTEXTS_V2_NODE_INSTANCES)); - - if(ctl->mode & (CONTEXTS_V2_NODES_INFO | CONTEXTS_V2_NODE_INSTANCES)) { - RRDHOST_STATUS s; - rrdhost_status(host, ctl->now, &s); - - if (ctl->mode & (CONTEXTS_V2_NODES_INFO)) { - buffer_json_member_add_string(wb, "v", rrdhost_program_version(host)); - - host_labels2json(host, wb, "labels"); - - if (host->system_info) { - buffer_json_member_add_object(wb, "hw"); - { - buffer_json_member_add_string_or_empty(wb, "architecture", host->system_info->architecture); - buffer_json_member_add_string_or_empty(wb, "cpu_frequency", host->system_info->host_cpu_freq); - buffer_json_member_add_string_or_empty(wb, "cpus", host->system_info->host_cores); - buffer_json_member_add_string_or_empty(wb, "memory", host->system_info->host_ram_total); - buffer_json_member_add_string_or_empty(wb, "disk_space", host->system_info->host_disk_space); - buffer_json_member_add_string_or_empty(wb, "virtualization", host->system_info->virtualization); - buffer_json_member_add_string_or_empty(wb, "container", host->system_info->container); - } - buffer_json_object_close(wb); - - buffer_json_member_add_object(wb, "os"); - { - buffer_json_member_add_string_or_empty(wb, "id", host->system_info->host_os_id); - buffer_json_member_add_string_or_empty(wb, "nm", host->system_info->host_os_name); - buffer_json_member_add_string_or_empty(wb, "v", host->system_info->host_os_version); - buffer_json_member_add_object(wb, "kernel"); - buffer_json_member_add_string_or_empty(wb, "nm", host->system_info->kernel_name); - buffer_json_member_add_string_or_empty(wb, "v", host->system_info->kernel_version); - buffer_json_object_close(wb); - } - buffer_json_object_close(wb); - } - - // created - the node is created but never connected to cloud - // unreachable - not currently connected - // stale - connected but not having live data - // reachable - connected with live data - // pruned - not connected for some time and has been removed - buffer_json_member_add_string(wb, "state", rrdhost_state_cloud_emulation(host) ? "reachable" : "stale"); - - rrdhost_health_to_json_v2(wb, "health", &s); - agent_capabilities_to_json(wb, host, "capabilities"); - } - - if (ctl->mode & (CONTEXTS_V2_NODE_INSTANCES)) { - buffer_json_member_add_array(wb, "instances"); - buffer_json_add_array_item_object(wb); // this instance - { - buffer_json_agent_status_id(wb, 0, 0); - - buffer_json_member_add_object(wb, "db"); - { - buffer_json_member_add_string(wb, "status", rrdhost_db_status_to_string(s.db.status)); - buffer_json_member_add_string(wb, "liveness", rrdhost_db_liveness_to_string(s.db.liveness)); - buffer_json_member_add_string(wb, "mode", rrd_memory_mode_name(s.db.mode)); - buffer_json_member_add_time_t(wb, "first_time", s.db.first_time_s); - buffer_json_member_add_time_t(wb, "last_time", s.db.last_time_s); - buffer_json_member_add_uint64(wb, "metrics", s.db.metrics); - buffer_json_member_add_uint64(wb, "instances", s.db.instances); - buffer_json_member_add_uint64(wb, "contexts", s.db.contexts); - } - buffer_json_object_close(wb); - - rrdhost_receiver_to_json(wb, &s, "ingest"); - rrdhost_sender_to_json(wb, &s, "stream"); - - buffer_json_member_add_object(wb, "ml"); - buffer_json_member_add_string(wb, "status", rrdhost_ml_status_to_string(s.ml.status)); - buffer_json_member_add_string(wb, "type", rrdhost_ml_type_to_string(s.ml.type)); - if (s.ml.status == RRDHOST_ML_STATUS_RUNNING) { - buffer_json_member_add_object(wb, "metrics"); - { - buffer_json_member_add_uint64(wb, "anomalous", s.ml.metrics.anomalous); - buffer_json_member_add_uint64(wb, "normal", s.ml.metrics.normal); - buffer_json_member_add_uint64(wb, "trained", s.ml.metrics.trained); - buffer_json_member_add_uint64(wb, "pending", s.ml.metrics.pending); - buffer_json_member_add_uint64(wb, "silenced", s.ml.metrics.silenced); - } - buffer_json_object_close(wb); // metrics - } - buffer_json_object_close(wb); // ml - - rrdhost_health_to_json_v2(wb, "health", &s); - - host_functions2json(host, wb); // functions - agent_capabilities_to_json(wb, host, "capabilities"); - - host_dyncfg_to_json_v2(wb, "dyncfg", &s); - } - buffer_json_object_close(wb); // this instance - buffer_json_array_close(wb); // instances - } - } - buffer_json_object_close(wb); // this node -} - -static ssize_t rrdcontext_to_json_v2_add_host(void *data, RRDHOST *host, bool queryable_host) { - if(!queryable_host || !host->rrdctx.contexts) - // the host matches the 'scope_host' but does not match the 'host' patterns - // or the host does not have any contexts - return 0; // continue to next host - - struct rrdcontext_to_json_v2_data *ctl = data; - - if(ctl->window.enabled && !rrdhost_matches_window(host, ctl->window.after, ctl->window.before, ctl->now)) - // the host does not have data in the requested window - return 0; // continue to next host - - if(ctl->request->timeout_ms && now_monotonic_usec() > ctl->timings.received_ut + ctl->request->timeout_ms * USEC_PER_MS) - // timed out - return -2; // stop the query - - if(ctl->request->interrupt_callback && ctl->request->interrupt_callback(ctl->request->interrupt_callback_data)) - // interrupted - return -1; // stop the query - - bool host_matched = (ctl->mode & CONTEXTS_V2_NODES); - bool do_contexts = (ctl->mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_ALERTS)); - - ctl->q.host_match = FTS_MATCHED_NONE; - if((ctl->mode & CONTEXTS_V2_SEARCH)) { - // check if we match the host itself - if(ctl->q.pattern && ( - full_text_search_string(&ctl->q.fts, ctl->q.pattern, host->hostname) || - full_text_search_char(&ctl->q.fts, ctl->q.pattern, host->machine_guid) || - (ctl->q.pattern && full_text_search_char(&ctl->q.fts, ctl->q.pattern, ctl->q.host_node_id_str)))) { - ctl->q.host_match = FTS_MATCHED_HOST; - do_contexts = true; - } - } - - if(do_contexts) { - // save it - SIMPLE_PATTERN *old_q = ctl->q.pattern; - - if(ctl->q.host_match == FTS_MATCHED_HOST) - // do not do pattern matching on contexts - we matched the host itself - ctl->q.pattern = NULL; - - ssize_t added = query_scope_foreach_context( - host, ctl->request->scope_contexts, - ctl->contexts.scope_pattern, ctl->contexts.pattern, - rrdcontext_to_json_v2_add_context, queryable_host, ctl); - - // restore it - ctl->q.pattern = old_q; - - if(unlikely(added < 0)) - return -1; // stop the query - - if(added) - host_matched = true; - } - - if(!host_matched) - return 0; - - if(ctl->mode & CONTEXTS_V2_FUNCTIONS) { - struct function_v2_entry t = { - .used = 1, - .size = 1, - .node_ids = &ctl->nodes.ni, - .help = NULL, - .tags = NULL, - .access = HTTP_ACCESS_ALL, - .priority = RRDFUNCTIONS_PRIORITY_DEFAULT, - }; - host_functions_to_dict(host, ctl->functions.dict, &t, sizeof(t), &t.help, &t.tags, &t.access, &t.priority); - } - - if(ctl->mode & CONTEXTS_V2_NODES) { - struct contexts_v2_node t = { - .ni = ctl->nodes.ni++, - .host = host, - }; - - dictionary_set(ctl->nodes.dict, host->machine_guid, &t, sizeof(struct contexts_v2_node)); - } - - return 1; -} - -static void buffer_json_contexts_v2_mode_to_array(BUFFER *wb, const char *key, CONTEXTS_V2_MODE mode) { - buffer_json_member_add_array(wb, key); - - if(mode & CONTEXTS_V2_VERSIONS) - buffer_json_add_array_item_string(wb, "versions"); - - if(mode & CONTEXTS_V2_AGENTS) - buffer_json_add_array_item_string(wb, "agents"); - - if(mode & CONTEXTS_V2_AGENTS_INFO) - buffer_json_add_array_item_string(wb, "agents-info"); - - if(mode & CONTEXTS_V2_NODES) - buffer_json_add_array_item_string(wb, "nodes"); - - if(mode & CONTEXTS_V2_NODES_INFO) - buffer_json_add_array_item_string(wb, "nodes-info"); - - if(mode & CONTEXTS_V2_NODE_INSTANCES) - buffer_json_add_array_item_string(wb, "nodes-instances"); - - if(mode & CONTEXTS_V2_CONTEXTS) - buffer_json_add_array_item_string(wb, "contexts"); - - if(mode & CONTEXTS_V2_SEARCH) - buffer_json_add_array_item_string(wb, "search"); - - if(mode & CONTEXTS_V2_ALERTS) - buffer_json_add_array_item_string(wb, "alerts"); - - if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) - buffer_json_add_array_item_string(wb, "alert_transitions"); - - buffer_json_array_close(wb); -} - -void buffer_json_query_timings(BUFFER *wb, const char *key, struct query_timings *timings) { - timings->finished_ut = now_monotonic_usec(); - if(!timings->executed_ut) - timings->executed_ut = timings->finished_ut; - if(!timings->preprocessed_ut) - timings->preprocessed_ut = timings->received_ut; - buffer_json_member_add_object(wb, key); - buffer_json_member_add_double(wb, "prep_ms", (NETDATA_DOUBLE)(timings->preprocessed_ut - timings->received_ut) / USEC_PER_MS); - buffer_json_member_add_double(wb, "query_ms", (NETDATA_DOUBLE)(timings->executed_ut - timings->preprocessed_ut) / USEC_PER_MS); - buffer_json_member_add_double(wb, "output_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->executed_ut) / USEC_PER_MS); - buffer_json_member_add_double(wb, "total_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->received_ut) / USEC_PER_MS); - buffer_json_member_add_double(wb, "cloud_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->received_ut) / USEC_PER_MS); - buffer_json_object_close(wb); -} - -void build_info_to_json_object(BUFFER *b); - -static void convert_seconds_to_dhms(time_t seconds, char *result, int result_size) { - int days, hours, minutes; - - days = (int) (seconds / (24 * 3600)); - seconds = (int) (seconds % (24 * 3600)); - hours = (int) (seconds / 3600); - seconds %= 3600; - minutes = (int) (seconds / 60); - seconds %= 60; - - // Format the result into the provided string buffer - BUFFER *buf = buffer_create(128, NULL); - if (days) - buffer_sprintf(buf,"%d day%s%s", days, days==1 ? "" : "s", hours || minutes ? ", " : ""); - if (hours) - buffer_sprintf(buf,"%d hour%s%s", hours, hours==1 ? "" : "s", minutes ? ", " : ""); - if (minutes) - buffer_sprintf(buf,"%d minute%s%s", minutes, minutes==1 ? "" : "s", seconds ? ", " : ""); - if (seconds) - buffer_sprintf(buf,"%d second%s", (int) seconds, seconds==1 ? "" : "s"); - strncpyz(result, buffer_tostring(buf), result_size); - buffer_free(buf); -} - -void buffer_json_agents_v2(BUFFER *wb, struct query_timings *timings, time_t now_s, bool info, bool array) { - if(!now_s) - now_s = now_realtime_sec(); - - if(array) { - buffer_json_member_add_array(wb, "agents"); - buffer_json_add_array_item_object(wb); - } - else - buffer_json_member_add_object(wb, "agent"); - - buffer_json_member_add_string(wb, "mg", localhost->machine_guid); - buffer_json_member_add_uuid(wb, "nd", localhost->node_id); - buffer_json_member_add_string(wb, "nm", rrdhost_hostname(localhost)); - buffer_json_member_add_time_t(wb, "now", now_s); - - if(array) - buffer_json_member_add_uint64(wb, "ai", 0); - - if(info) { - buffer_json_member_add_object(wb, "application"); - build_info_to_json_object(wb); - buffer_json_object_close(wb); // netdata - - buffer_json_cloud_status(wb, now_s); - - buffer_json_member_add_array(wb, "db_size"); - size_t group_seconds = localhost->rrd_update_every; - for (size_t tier = 0; tier < storage_tiers; tier++) { - STORAGE_ENGINE *eng = localhost->db[tier].eng; - if (!eng) continue; - - group_seconds *= storage_tiers_grouping_iterations[tier]; - uint64_t max = storage_engine_disk_space_max(eng->seb, localhost->db[tier].si); - uint64_t used = storage_engine_disk_space_used(eng->seb, localhost->db[tier].si); -#ifdef ENABLE_DBENGINE - if (!max && eng->seb == STORAGE_ENGINE_BACKEND_DBENGINE) { - max = get_directory_free_bytes_space(multidb_ctx[tier]); - max += used; - } -#endif - time_t first_time_s = storage_engine_global_first_time_s(eng->seb, localhost->db[tier].si); - size_t currently_collected_metrics = storage_engine_collected_metrics(eng->seb, localhost->db[tier].si); - - NETDATA_DOUBLE percent; - if (used && max) - percent = (NETDATA_DOUBLE) used * 100.0 / (NETDATA_DOUBLE) max; - else - percent = 0.0; - - buffer_json_add_array_item_object(wb); - buffer_json_member_add_uint64(wb, "tier", tier); - char human_retention[128]; - convert_seconds_to_dhms((time_t) group_seconds, human_retention, sizeof(human_retention) - 1); - buffer_json_member_add_string(wb, "point_every", human_retention); - - buffer_json_member_add_uint64(wb, "metrics", storage_engine_metrics(eng->seb, localhost->db[tier].si)); - buffer_json_member_add_uint64(wb, "samples", storage_engine_samples(eng->seb, localhost->db[tier].si)); - - if(used || max) { - buffer_json_member_add_uint64(wb, "disk_used", used); - buffer_json_member_add_uint64(wb, "disk_max", max); - buffer_json_member_add_double(wb, "disk_percent", percent); - } - - if(first_time_s) { - time_t retention = now_s - first_time_s; - - buffer_json_member_add_time_t(wb, "from", first_time_s); - buffer_json_member_add_time_t(wb, "to", now_s); - buffer_json_member_add_time_t(wb, "retention", retention); - - convert_seconds_to_dhms(retention, human_retention, sizeof(human_retention) - 1); - buffer_json_member_add_string(wb, "retention_human", human_retention); - - if(used || max) { // we have disk space information - time_t time_retention = 0; -#ifdef ENABLE_DBENGINE - time_retention = multidb_ctx[tier]->config.max_retention_s; -#endif - time_t space_retention = (time_t)((NETDATA_DOUBLE)(now_s - first_time_s) * 100.0 / percent); - time_t actual_retention = MIN(space_retention, time_retention ? time_retention : space_retention); - - if (time_retention) { - convert_seconds_to_dhms(time_retention, human_retention, sizeof(human_retention) - 1); - buffer_json_member_add_time_t(wb, "requested_retention", time_retention); - buffer_json_member_add_string(wb, "requested_retention_human", human_retention); - } - - convert_seconds_to_dhms(actual_retention, human_retention, sizeof(human_retention) - 1); - buffer_json_member_add_time_t(wb, "expected_retention", actual_retention); - buffer_json_member_add_string(wb, "expected_retention_human", human_retention); - } - } - - if(currently_collected_metrics) - buffer_json_member_add_uint64(wb, "currently_collected_metrics", currently_collected_metrics); - - buffer_json_object_close(wb); - } - buffer_json_array_close(wb); // db_size - } - - if(timings) - buffer_json_query_timings(wb, "timings", timings); - - buffer_json_object_close(wb); - - if(array) - buffer_json_array_close(wb); -} - -void buffer_json_cloud_timings(BUFFER *wb, const char *key, struct query_timings *timings) { - if(!timings->finished_ut) - timings->finished_ut = now_monotonic_usec(); - - buffer_json_member_add_object(wb, key); - buffer_json_member_add_double(wb, "routing_ms", 0.0); - buffer_json_member_add_double(wb, "node_max_ms", 0.0); - buffer_json_member_add_double(wb, "total_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->received_ut) / USEC_PER_MS); - buffer_json_object_close(wb); -} - -static void functions_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { - struct function_v2_entry *t = value; - - // it is initialized with a static reference - we need to mallocz() the array - size_t *v = t->node_ids; - t->node_ids = mallocz(sizeof(size_t)); - *t->node_ids = *v; - t->size = 1; - t->used = 1; -} - -static bool functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) { - struct function_v2_entry *t = old_value, *n = new_value; - size_t *v = n->node_ids; - - if(t->used >= t->size) { - t->node_ids = reallocz(t->node_ids, t->size * 2 * sizeof(size_t)); - t->size *= 2; - } - - t->node_ids[t->used++] = *v; - - return true; -} - -static void functions_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { - struct function_v2_entry *t = value; - freez(t->node_ids); -} - -static bool contexts_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) { - struct context_v2_entry *o = old_value; - struct context_v2_entry *n = new_value; - - o->count++; - - if(o->family != n->family) { - if((o->flags & RRD_FLAG_COLLECTED) && !(n->flags & RRD_FLAG_COLLECTED)) - // keep old - ; - else if(!(o->flags & RRD_FLAG_COLLECTED) && (n->flags & RRD_FLAG_COLLECTED)) { - // keep new - string_freez(o->family); - o->family = string_dup(n->family); - } - else { - // merge - STRING *old_family = o->family; - o->family = string_2way_merge(o->family, n->family); - string_freez(old_family); - } - } - - if(o->priority != n->priority) { - if((o->flags & RRD_FLAG_COLLECTED) && !(n->flags & RRD_FLAG_COLLECTED)) - // keep o - ; - else if(!(o->flags & RRD_FLAG_COLLECTED) && (n->flags & RRD_FLAG_COLLECTED)) - // keep n - o->priority = n->priority; - else - // keep the min - o->priority = MIN(o->priority, n->priority); - } - - if(o->first_time_s && n->first_time_s) - o->first_time_s = MIN(o->first_time_s, n->first_time_s); - else if(!o->first_time_s) - o->first_time_s = n->first_time_s; - - if(o->last_time_s && n->last_time_s) - o->last_time_s = MAX(o->last_time_s, n->last_time_s); - else if(!o->last_time_s) - o->last_time_s = n->last_time_s; - - o->flags |= n->flags; - o->match = MIN(o->match, n->match); - - string_freez(n->family); - - return true; -} - -static void contexts_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { - struct context_v2_entry *z = value; - string_freez(z->family); -} - -static void rrdcontext_v2_set_transition_filter(const char *machine_guid, const char *context, time_t alarm_id, void *data) { - struct rrdcontext_to_json_v2_data *ctl = data; - - if(machine_guid && *machine_guid) { - if(ctl->nodes.scope_pattern) - simple_pattern_free(ctl->nodes.scope_pattern); - - if(ctl->nodes.pattern) - simple_pattern_free(ctl->nodes.pattern); - - ctl->nodes.scope_pattern = string_to_simple_pattern(machine_guid); - ctl->nodes.pattern = NULL; - } - - if(context && *context) { - if(ctl->contexts.scope_pattern) - simple_pattern_free(ctl->contexts.scope_pattern); - - if(ctl->contexts.pattern) - simple_pattern_free(ctl->contexts.pattern); - - ctl->contexts.scope_pattern = string_to_simple_pattern(context); - ctl->contexts.pattern = NULL; - } - - ctl->alerts.alarm_id_filter = alarm_id; -} - -struct alert_instances_callback_data { - BUFFER *wb; - struct rrdcontext_to_json_v2_data *ctl; - bool debug; -}; - -static void contexts_v2_alert_config_to_json_from_sql_alert_config_data(struct sql_alert_config_data *t, void *data) { - struct alert_transitions_callback_data *d = data; - BUFFER *wb = d->wb; - bool debug = d->debug; - d->configs_added++; - - if(d->only_one_config) - buffer_json_add_array_item_object(wb); // alert config - - { - buffer_json_member_add_string(wb, "name", t->name); - buffer_json_member_add_uuid(wb, "config_hash_id", t->config_hash_id); - - buffer_json_member_add_object(wb, "selectors"); - { - bool is_template = t->selectors.on_template && *t->selectors.on_template ? true : false; - buffer_json_member_add_string(wb, "type", is_template ? "template" : "alarm"); - buffer_json_member_add_string(wb, "on", is_template ? t->selectors.on_template : t->selectors.on_key); - - buffer_json_member_add_string(wb, "families", t->selectors.families); - buffer_json_member_add_string(wb, "host_labels", t->selectors.host_labels); - buffer_json_member_add_string(wb, "chart_labels", t->selectors.chart_labels); - } - buffer_json_object_close(wb); // selectors - - buffer_json_member_add_object(wb, "value"); // value - { - // buffer_json_member_add_string(wb, "every", t->value.every); // does not exist in Netdata Cloud - buffer_json_member_add_string(wb, "units", t->value.units); - buffer_json_member_add_uint64(wb, "update_every", t->value.update_every); - - if (t->value.db.after || debug) { - buffer_json_member_add_object(wb, "db"); - { - // buffer_json_member_add_string(wb, "lookup", t->value.db.lookup); // does not exist in Netdata Cloud - - buffer_json_member_add_time_t(wb, "after", t->value.db.after); - buffer_json_member_add_time_t(wb, "before", t->value.db.before); - buffer_json_member_add_string(wb, "time_group_condition", alerts_group_conditions_id2txt(t->value.db.time_group_condition)); - buffer_json_member_add_double(wb, "time_group_value", t->value.db.time_group_value); - buffer_json_member_add_string(wb, "dims_group", alerts_dims_grouping_id2group(t->value.db.dims_group)); - buffer_json_member_add_string(wb, "data_source", alerts_data_source_id2source(t->value.db.data_source)); - buffer_json_member_add_string(wb, "method", t->value.db.method); - buffer_json_member_add_string(wb, "dimensions", t->value.db.dimensions); - rrdr_options_to_buffer_json_array(wb, "options", (RRDR_OPTIONS)t->value.db.options); - } - buffer_json_object_close(wb); // db - } - - if (t->value.calc || debug) - buffer_json_member_add_string(wb, "calc", t->value.calc); - } - buffer_json_object_close(wb); // value - - if (t->status.warn || t->status.crit || debug) { - buffer_json_member_add_object(wb, "status"); // status - { - NETDATA_DOUBLE green = t->status.green ? str2ndd(t->status.green, NULL) : NAN; - NETDATA_DOUBLE red = t->status.red ? str2ndd(t->status.red, NULL) : NAN; - - if (!isnan(green) || debug) - buffer_json_member_add_double(wb, "green", green); - - if (!isnan(red) || debug) - buffer_json_member_add_double(wb, "red", red); - - if (t->status.warn || debug) - buffer_json_member_add_string(wb, "warn", t->status.warn); - - if (t->status.crit || debug) - buffer_json_member_add_string(wb, "crit", t->status.crit); - } - buffer_json_object_close(wb); // status - } - - buffer_json_member_add_object(wb, "notification"); - { - buffer_json_member_add_string(wb, "type", "agent"); - buffer_json_member_add_string(wb, "exec", t->notification.exec ? t->notification.exec : NULL); - buffer_json_member_add_string(wb, "to", t->notification.to_key ? t->notification.to_key : string2str(localhost->health.health_default_recipient)); - buffer_json_member_add_string(wb, "delay", t->notification.delay); - buffer_json_member_add_string(wb, "repeat", t->notification.repeat); - buffer_json_member_add_string(wb, "options", t->notification.options); - } - buffer_json_object_close(wb); // notification - - buffer_json_member_add_string(wb, "class", t->classification); - buffer_json_member_add_string(wb, "component", t->component); - buffer_json_member_add_string(wb, "type", t->type); - buffer_json_member_add_string(wb, "info", t->info); - buffer_json_member_add_string(wb, "summary", t->summary); - // buffer_json_member_add_string(wb, "source", t->source); // moved to alert instance - } - - if(d->only_one_config) - buffer_json_object_close(wb); -} - -int contexts_v2_alert_config_to_json(struct web_client *w, const char *config_hash_id) { - struct alert_transitions_callback_data data = { - .wb = w->response.data, - .debug = false, - .only_one_config = false, - }; - DICTIONARY *configs = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE); - dictionary_set(configs, config_hash_id, NULL, 0); - - buffer_flush(w->response.data); - - buffer_json_initialize(w->response.data, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - - int added = sql_get_alert_configuration(configs, contexts_v2_alert_config_to_json_from_sql_alert_config_data, &data, false); - buffer_json_finalize(w->response.data); - - int ret = HTTP_RESP_OK; - - if(added <= 0) { - buffer_flush(w->response.data); - w->response.data->content_type = CT_TEXT_PLAIN; - if(added < 0) { - buffer_strcat(w->response.data, "Failed to execute SQL query."); - ret = HTTP_RESP_INTERNAL_SERVER_ERROR; - } - else { - buffer_strcat(w->response.data, "Config is not found."); - ret = HTTP_RESP_NOT_FOUND; - } - } - - return ret; -} - -static int contexts_v2_alert_instance_to_json_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { - struct sql_alert_instance_v2_entry *t = value; - struct alert_instances_callback_data *d = data; - struct rrdcontext_to_json_v2_data *ctl = d->ctl; (void)ctl; - bool debug = d->debug; (void)debug; - BUFFER *wb = d->wb; - - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_uint64(wb, "ni", t->ni); - - buffer_json_member_add_string(wb, "nm", string2str(t->name)); - buffer_json_member_add_string(wb, "ch", string2str(t->chart_id)); - buffer_json_member_add_string(wb, "ch_n", string2str(t->chart_name)); - - if(ctl->request->options & CONTEXT_V2_OPTION_ALERTS_WITH_SUMMARY) - buffer_json_member_add_uint64(wb, "ati", t->ati); - - if(ctl->request->options & CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES) { - buffer_json_member_add_string(wb, "units", string2str(t->units)); - buffer_json_member_add_string(wb, "fami", string2str(t->family)); - buffer_json_member_add_string(wb, "info", string2str(t->info)); - buffer_json_member_add_string(wb, "sum", string2str(t->summary)); - buffer_json_member_add_string(wb, "ctx", string2str(t->context)); - buffer_json_member_add_string(wb, "st", rrdcalc_status2string(t->status)); - buffer_json_member_add_uuid(wb, "tr_i", &t->last_transition_id); - buffer_json_member_add_double(wb, "tr_v", t->last_status_change_value); - buffer_json_member_add_time_t(wb, "tr_t", t->last_status_change); - buffer_json_member_add_uuid(wb, "cfg", &t->config_hash_id); - buffer_json_member_add_string(wb, "src", string2str(t->source)); - - buffer_json_member_add_string(wb, "to", string2str(t->recipient)); - buffer_json_member_add_string(wb, "tp", string2str(t->type)); - buffer_json_member_add_string(wb, "cm", string2str(t->component)); - buffer_json_member_add_string(wb, "cl", string2str(t->classification)); - - // Agent specific fields - buffer_json_member_add_uint64(wb, "gi", t->global_id); - // rrdcalc_flags_to_json_array (wb, "flags", t->flags); - } - - if(ctl->request->options & CONTEXT_V2_OPTION_ALERTS_WITH_VALUES) { - // Netdata Cloud fetched these by querying the agents - buffer_json_member_add_double(wb, "v", t->value); - buffer_json_member_add_time_t(wb, "t", t->last_updated); - } - } - buffer_json_object_close(wb); // alert instance - - return 1; -} - -static void contexts_v2_alerts_by_x_update_prototypes(void *data, STRING *type, STRING *component, STRING *classification, STRING *recipient) { - struct rrdcontext_to_json_v2_data *ctl = data; - - dictionary_set_advanced(ctl->alerts.by_type, string2str(type), (ssize_t)string_strlen(type), NULL, sizeof(struct alert_by_x_entry), NULL); - dictionary_set_advanced(ctl->alerts.by_component, string2str(component), (ssize_t)string_strlen(component), NULL, sizeof(struct alert_by_x_entry), NULL); - dictionary_set_advanced(ctl->alerts.by_classification, string2str(classification), (ssize_t)string_strlen(classification), NULL, sizeof(struct alert_by_x_entry), NULL); - dictionary_set_advanced(ctl->alerts.by_recipient, string2str(recipient), (ssize_t)string_strlen(recipient), NULL, sizeof(struct alert_by_x_entry), NULL); -} - -static void contexts_v2_alerts_by_x_to_json(BUFFER *wb, DICTIONARY *dict, const char *key) { - buffer_json_member_add_array(wb, key); - { - struct alert_by_x_entry *b; - dfe_start_read(dict, b) { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_string(wb, "name", b_dfe.name); - buffer_json_member_add_uint64(wb, "cr", b->running.counts.critical); - buffer_json_member_add_uint64(wb, "wr", b->running.counts.warning); - buffer_json_member_add_uint64(wb, "cl", b->running.counts.clear); - buffer_json_member_add_uint64(wb, "er", b->running.counts.error); - buffer_json_member_add_uint64(wb, "running", b->running.total); - - buffer_json_member_add_uint64(wb, "running_silent", b->running.silent); - - if(b->prototypes.available) - buffer_json_member_add_uint64(wb, "available", b->prototypes.available); - } - buffer_json_object_close(wb); - } - dfe_done(b); - } - buffer_json_array_close(wb); -} - -static void contexts_v2_alert_instances_to_json(BUFFER *wb, const char *key, struct rrdcontext_to_json_v2_data *ctl, bool debug) { - buffer_json_member_add_array(wb, key); - { - struct alert_instances_callback_data data = { - .wb = wb, - .ctl = ctl, - .debug = debug, - }; - dictionary_walkthrough_rw(ctl->alerts.alert_instances, DICTIONARY_LOCK_READ, - contexts_v2_alert_instance_to_json_callback, &data); - } - buffer_json_array_close(wb); // alerts_instances -} - -static void contexts_v2_alerts_to_json(BUFFER *wb, struct rrdcontext_to_json_v2_data *ctl, bool debug) { - if(ctl->request->options & CONTEXT_V2_OPTION_ALERTS_WITH_SUMMARY) { - buffer_json_member_add_array(wb, "alerts"); - { - struct alert_v2_entry *t; - dfe_start_read(ctl->alerts.summary, t) - { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_uint64(wb, "ati", t->ati); - - buffer_json_member_add_array(wb, "ni"); - void *host_guid; - dfe_start_read(t->nodes, host_guid) { - struct contexts_v2_node *cn = dictionary_get(ctl->nodes.dict,host_guid_dfe.name); - buffer_json_add_array_item_int64(wb, (int64_t) cn->ni); - } - dfe_done(host_guid); - buffer_json_array_close(wb); - - buffer_json_member_add_string(wb, "nm", string2str(t->name)); - buffer_json_member_add_string(wb, "sum", string2str(t->summary)); - - buffer_json_member_add_uint64(wb, "cr", t->counts.critical); - buffer_json_member_add_uint64(wb, "wr", t->counts.warning); - buffer_json_member_add_uint64(wb, "cl", t->counts.clear); - buffer_json_member_add_uint64(wb, "er", t->counts.error); - - buffer_json_member_add_uint64(wb, "in", t->instances); - buffer_json_member_add_uint64(wb, "nd", dictionary_entries(t->nodes)); - buffer_json_member_add_uint64(wb, "cfg", dictionary_entries(t->configs)); - - buffer_json_member_add_array(wb, "ctx"); - rrdlabels_key_to_buffer_array_item(t->context, wb); - buffer_json_array_close(wb); // ctx - - buffer_json_member_add_array(wb, "cls"); - rrdlabels_key_to_buffer_array_item(t->classification, wb); - buffer_json_array_close(wb); // classification - - - buffer_json_member_add_array(wb, "cp"); - rrdlabels_key_to_buffer_array_item(t->component, wb); - buffer_json_array_close(wb); // component - - buffer_json_member_add_array(wb, "ty"); - rrdlabels_key_to_buffer_array_item(t->type, wb); - buffer_json_array_close(wb); // type - - buffer_json_member_add_array(wb, "to"); - rrdlabels_key_to_buffer_array_item(t->recipient, wb); - buffer_json_array_close(wb); // recipient - } - buffer_json_object_close(wb); // alert name - } - dfe_done(t); - } - buffer_json_array_close(wb); // alerts - - health_prototype_metadata_foreach(ctl, contexts_v2_alerts_by_x_update_prototypes); - contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_type, "alerts_by_type"); - contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_component, "alerts_by_component"); - contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_classification, "alerts_by_classification"); - contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_recipient, "alerts_by_recipient"); - contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_module, "alerts_by_module"); - } - - if(ctl->request->options & (CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES|CONTEXT_V2_OPTION_ALERTS_WITH_VALUES)) { - contexts_v2_alert_instances_to_json(wb, "alert_instances", ctl, debug); - } -} - -#define SQL_TRANSITION_DATA_SMALL_STRING (6 * 8) -#define SQL_TRANSITION_DATA_MEDIUM_STRING (12 * 8) -#define SQL_TRANSITION_DATA_BIG_STRING 512 - -struct sql_alert_transition_fixed_size { - usec_t global_id; - nd_uuid_t transition_id; - nd_uuid_t host_id; - nd_uuid_t config_hash_id; - uint32_t alarm_id; - char alert_name[SQL_TRANSITION_DATA_SMALL_STRING]; - char chart[RRD_ID_LENGTH_MAX]; - char chart_name[RRD_ID_LENGTH_MAX]; - char chart_context[SQL_TRANSITION_DATA_MEDIUM_STRING]; - char family[SQL_TRANSITION_DATA_SMALL_STRING]; - char recipient[SQL_TRANSITION_DATA_MEDIUM_STRING]; - char units[SQL_TRANSITION_DATA_SMALL_STRING]; - char exec[SQL_TRANSITION_DATA_BIG_STRING]; - char info[SQL_TRANSITION_DATA_BIG_STRING]; - char summary[SQL_TRANSITION_DATA_BIG_STRING]; - char classification[SQL_TRANSITION_DATA_SMALL_STRING]; - char type[SQL_TRANSITION_DATA_SMALL_STRING]; - char component[SQL_TRANSITION_DATA_SMALL_STRING]; - time_t when_key; - time_t duration; - time_t non_clear_duration; - uint64_t flags; - time_t delay_up_to_timestamp; - time_t exec_run_timestamp; - int exec_code; - int new_status; - int old_status; - int delay; - time_t last_repeat; - NETDATA_DOUBLE new_value; - NETDATA_DOUBLE old_value; - - char machine_guid[UUID_STR_LEN]; - struct sql_alert_transition_fixed_size *next; - struct sql_alert_transition_fixed_size *prev; -}; - -static struct sql_alert_transition_fixed_size *contexts_v2_alert_transition_dup(struct sql_alert_transition_data *t, const char *machine_guid, struct sql_alert_transition_fixed_size *dst) { - struct sql_alert_transition_fixed_size *n = dst ? dst : mallocz(sizeof(*n)); - - n->global_id = t->global_id; - uuid_copy(n->transition_id, *t->transition_id); - uuid_copy(n->host_id, *t->host_id); - uuid_copy(n->config_hash_id, *t->config_hash_id); - n->alarm_id = t->alarm_id; - strncpyz(n->alert_name, t->alert_name ? t->alert_name : "", sizeof(n->alert_name) - 1); - strncpyz(n->chart, t->chart ? t->chart : "", sizeof(n->chart) - 1); - strncpyz(n->chart_name, t->chart_name ? t->chart_name : n->chart, sizeof(n->chart_name) - 1); - strncpyz(n->chart_context, t->chart_context ? t->chart_context : "", sizeof(n->chart_context) - 1); - strncpyz(n->family, t->family ? t->family : "", sizeof(n->family) - 1); - strncpyz(n->recipient, t->recipient ? t->recipient : "", sizeof(n->recipient) - 1); - strncpyz(n->units, t->units ? t->units : "", sizeof(n->units) - 1); - strncpyz(n->exec, t->exec ? t->exec : "", sizeof(n->exec) - 1); - strncpyz(n->info, t->info ? t->info : "", sizeof(n->info) - 1); - strncpyz(n->summary, t->summary ? t->summary : "", sizeof(n->summary) - 1); - strncpyz(n->classification, t->classification ? t->classification : "", sizeof(n->classification) - 1); - strncpyz(n->type, t->type ? t->type : "", sizeof(n->type) - 1); - strncpyz(n->component, t->component ? t->component : "", sizeof(n->component) - 1); - n->when_key = t->when_key; - n->duration = t->duration; - n->non_clear_duration = t->non_clear_duration; - n->flags = t->flags; - n->delay_up_to_timestamp = t->delay_up_to_timestamp; - n->exec_run_timestamp = t->exec_run_timestamp; - n->exec_code = t->exec_code; - n->new_status = t->new_status; - n->old_status = t->old_status; - n->delay = t->delay; - n->last_repeat = t->last_repeat; - n->new_value = t->new_value; - n->old_value = t->old_value; - - memcpy(n->machine_guid, machine_guid, sizeof(n->machine_guid)); - n->next = n->prev = NULL; - - return n; -} - -static void contexts_v2_alert_transition_free(struct sql_alert_transition_fixed_size *t) { - freez(t); -} - -static inline void contexts_v2_alert_transition_keep(struct alert_transitions_callback_data *d, struct sql_alert_transition_data *t, const char *machine_guid) { - d->items_matched++; - - if(unlikely(t->global_id <= d->ctl->request->alerts.global_id_anchor)) { - // this is in our past, we are not interested - d->operations.skips_before++; - return; - } - - if(unlikely(!d->base)) { - d->last_added = contexts_v2_alert_transition_dup(t, machine_guid, NULL); - DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(d->base, d->last_added, prev, next); - d->items_to_return++; - d->operations.first++; - return; - } - - struct sql_alert_transition_fixed_size *last = d->last_added; - while(last->prev != d->base->prev && t->global_id > last->prev->global_id) { - last = last->prev; - d->operations.backwards++; - } - - while(last->next && t->global_id < last->next->global_id) { - last = last->next; - d->operations.forwards++; - } - - if(d->items_to_return >= d->max_items_to_return) { - if(last == d->base->prev && t->global_id < last->global_id) { - d->operations.skips_after++; - return; - } - } - - d->items_to_return++; - - if(t->global_id > last->global_id) { - if(d->items_to_return > d->max_items_to_return) { - d->items_to_return--; - d->operations.shifts++; - d->last_added = d->base->prev; - DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(d->base, d->last_added, prev, next); - d->last_added = contexts_v2_alert_transition_dup(t, machine_guid, d->last_added); - } - DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(d->base, d->last_added, prev, next); - d->operations.prepend++; - } - else { - d->last_added = contexts_v2_alert_transition_dup(t, machine_guid, NULL); - DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(d->base, d->last_added, prev, next); - d->operations.append++; - } - - while(d->items_to_return > d->max_items_to_return) { - // we have to remove something - - struct sql_alert_transition_fixed_size *tmp = d->base->prev; - DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(d->base, tmp, prev, next); - d->items_to_return--; - - if(unlikely(d->last_added == tmp)) - d->last_added = d->base; - - contexts_v2_alert_transition_free(tmp); - - d->operations.shifts++; - } -} - -static void contexts_v2_alert_transition_callback(struct sql_alert_transition_data *t, void *data) { - struct alert_transitions_callback_data *d = data; - d->items_evaluated++; - - char machine_guid[UUID_STR_LEN] = ""; - uuid_unparse_lower(*t->host_id, machine_guid); - - const char *facets[ATF_TOTAL_ENTRIES] = { - [ATF_STATUS] = rrdcalc_status2string(t->new_status), - [ATF_CLASS] = t->classification, - [ATF_TYPE] = t->type, - [ATF_COMPONENT] = t->component, - [ATF_ROLE] = t->recipient && *t->recipient ? t->recipient : string2str(localhost->health.health_default_recipient), - [ATF_NODE] = machine_guid, - [ATF_ALERT_NAME] = t->alert_name, - [ATF_CHART_NAME] = t->chart_name, - [ATF_CONTEXT] = t->chart_context, - }; - - for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { - if (!facets[i] || !*facets[i]) facets[i] = "unknown"; - - struct facet_entry tmp = { - .count = 0, - }; - dictionary_set(d->facets[i].dict, facets[i], &tmp, sizeof(tmp)); - } - - bool selected[ATF_TOTAL_ENTRIES] = { 0 }; - - uint32_t selected_by = 0; - for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { - selected[i] = !d->facets[i].pattern || simple_pattern_matches(d->facets[i].pattern, facets[i]); - if(selected[i]) - selected_by++; - } - - if(selected_by == ATF_TOTAL_ENTRIES) { - // this item is selected by all facets - // put it in our result (if it fits) - contexts_v2_alert_transition_keep(d, t, machine_guid); - } - - if(selected_by >= ATF_TOTAL_ENTRIES - 1) { - // this item is selected by all, or all except one facet - // in both cases we need to add it to our counters - - for (size_t i = 0; i < ATF_TOTAL_ENTRIES; i++) { - uint32_t counted_by = selected_by; - - if (counted_by != ATF_TOTAL_ENTRIES) { - counted_by = 0; - for (size_t j = 0; j < ATF_TOTAL_ENTRIES; j++) { - if (i == j || selected[j]) - counted_by++; - } - } - - if (counted_by == ATF_TOTAL_ENTRIES) { - // we need to count it on this facet - struct facet_entry *x = dictionary_get(d->facets[i].dict, facets[i]); - internal_fatal(!x, "facet is not found"); - if(x) - x->count++; - } - } - } -} - -static void contexts_v2_alert_transitions_to_json(BUFFER *wb, struct rrdcontext_to_json_v2_data *ctl, bool debug) { - struct alert_transitions_callback_data data = { - .wb = wb, - .ctl = ctl, - .debug = debug, - .only_one_config = true, - .max_items_to_return = ctl->request->alerts.last, - .items_to_return = 0, - .base = NULL, - }; - - for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { - data.facets[i].dict = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_FIXED_SIZE | DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, sizeof(struct facet_entry)); - if(ctl->request->alerts.facets[i]) - data.facets[i].pattern = simple_pattern_create(ctl->request->alerts.facets[i], ",|", SIMPLE_PATTERN_EXACT, false); - } - - sql_alert_transitions( - ctl->nodes.dict, - ctl->window.after, - ctl->window.before, - ctl->request->contexts, - ctl->request->alerts.alert, - ctl->request->alerts.transition, - contexts_v2_alert_transition_callback, - &data, - debug); - - buffer_json_member_add_array(wb, "facets"); - for (size_t i = 0; i < ATF_TOTAL_ENTRIES; i++) { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_string(wb, "id", alert_transition_facets[i].id); - buffer_json_member_add_string(wb, "name", alert_transition_facets[i].name); - buffer_json_member_add_uint64(wb, "order", alert_transition_facets[i].order); - buffer_json_member_add_array(wb, "options"); - { - struct facet_entry *x; - dfe_start_read(data.facets[i].dict, x) { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_string(wb, "id", x_dfe.name); - if (i == ATF_NODE) { - RRDHOST *host = rrdhost_find_by_guid(x_dfe.name); - if (host) - buffer_json_member_add_string(wb, "name", rrdhost_hostname(host)); - else - buffer_json_member_add_string(wb, "name", x_dfe.name); - } else - buffer_json_member_add_string(wb, "name", x_dfe.name); - buffer_json_member_add_uint64(wb, "count", x->count); - } - buffer_json_object_close(wb); - } - dfe_done(x); - } - buffer_json_array_close(wb); // options - } - buffer_json_object_close(wb); // facet - } - buffer_json_array_close(wb); // facets - - buffer_json_member_add_array(wb, "transitions"); - for(struct sql_alert_transition_fixed_size *t = data.base; t ; t = t->next) { - buffer_json_add_array_item_object(wb); - { - RRDHOST *host = rrdhost_find_by_guid(t->machine_guid); - - buffer_json_member_add_uint64(wb, "gi", t->global_id); - buffer_json_member_add_uuid(wb, "transition_id", &t->transition_id); - buffer_json_member_add_uuid(wb, "config_hash_id", &t->config_hash_id); - buffer_json_member_add_string(wb, "machine_guid", t->machine_guid); - - if(host) { - buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(host)); - - if(host->node_id) - buffer_json_member_add_uuid(wb, "node_id", host->node_id); - } - - buffer_json_member_add_string(wb, "alert", *t->alert_name ? t->alert_name : NULL); - buffer_json_member_add_string(wb, "instance", *t->chart ? t->chart : NULL); - buffer_json_member_add_string(wb, "instance_n", *t->chart_name ? t->chart_name : NULL); - buffer_json_member_add_string(wb, "context", *t->chart_context ? t->chart_context : NULL); - // buffer_json_member_add_string(wb, "family", *t->family ? t->family : NULL); - buffer_json_member_add_string(wb, "component", *t->component ? t->component : NULL); - buffer_json_member_add_string(wb, "classification", *t->classification ? t->classification : NULL); - buffer_json_member_add_string(wb, "type", *t->type ? t->type : NULL); - - buffer_json_member_add_time_t(wb, "when", t->when_key); - buffer_json_member_add_string(wb, "info", *t->info ? t->info : ""); - buffer_json_member_add_string(wb, "summary", *t->summary ? t->summary : ""); - buffer_json_member_add_string(wb, "units", *t->units ? t->units : NULL); - buffer_json_member_add_object(wb, "new"); - { - buffer_json_member_add_string(wb, "status", rrdcalc_status2string(t->new_status)); - buffer_json_member_add_double(wb, "value", t->new_value); - } - buffer_json_object_close(wb); // new - buffer_json_member_add_object(wb, "old"); - { - buffer_json_member_add_string(wb, "status", rrdcalc_status2string(t->old_status)); - buffer_json_member_add_double(wb, "value", t->old_value); - buffer_json_member_add_time_t(wb, "duration", t->duration); - buffer_json_member_add_time_t(wb, "raised_duration", t->non_clear_duration); - } - buffer_json_object_close(wb); // old - - buffer_json_member_add_object(wb, "notification"); - { - buffer_json_member_add_time_t(wb, "when", t->exec_run_timestamp); - buffer_json_member_add_time_t(wb, "delay", t->delay); - buffer_json_member_add_time_t(wb, "delay_up_to_time", t->delay_up_to_timestamp); - health_entry_flags_to_json_array(wb, "flags", t->flags); - buffer_json_member_add_string(wb, "exec", *t->exec ? t->exec : string2str(localhost->health.health_default_exec)); - buffer_json_member_add_uint64(wb, "exec_code", t->exec_code); - buffer_json_member_add_string(wb, "to", *t->recipient ? t->recipient : string2str(localhost->health.health_default_recipient)); - } - buffer_json_object_close(wb); // notification - } - buffer_json_object_close(wb); // a transition - } - buffer_json_array_close(wb); // all transitions - - if(ctl->options & CONTEXT_V2_OPTION_ALERTS_WITH_CONFIGURATIONS) { - DICTIONARY *configs = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE); - - for(struct sql_alert_transition_fixed_size *t = data.base; t ; t = t->next) { - char guid[UUID_STR_LEN]; - uuid_unparse_lower(t->config_hash_id, guid); - dictionary_set(configs, guid, NULL, 0); - } - - buffer_json_member_add_array(wb, "configurations"); - sql_get_alert_configuration(configs, contexts_v2_alert_config_to_json_from_sql_alert_config_data, &data, debug); - buffer_json_array_close(wb); - - dictionary_destroy(configs); - } - - while(data.base) { - struct sql_alert_transition_fixed_size *t = data.base; - DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(data.base, t, prev, next); - contexts_v2_alert_transition_free(t); - } - - for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { - dictionary_destroy(data.facets[i].dict); - simple_pattern_free(data.facets[i].pattern); - } - - buffer_json_member_add_object(wb, "items"); - { - // all the items in the window, under the scope_nodes, ignoring the facets (filters) - buffer_json_member_add_uint64(wb, "evaluated", data.items_evaluated); - - // all the items matching the query (if you didn't put anchor_gi and last, these are all the items you would get back) - buffer_json_member_add_uint64(wb, "matched", data.items_matched); - - // the items included in this response - buffer_json_member_add_uint64(wb, "returned", data.items_to_return); - - // same as last=X parameter - buffer_json_member_add_uint64(wb, "max_to_return", data.max_items_to_return); - - // items before the first returned, this should be 0 if anchor_gi is not set - buffer_json_member_add_uint64(wb, "before", data.operations.skips_before); - - // items after the last returned, when this is zero there aren't any items after the current list - buffer_json_member_add_uint64(wb, "after", data.operations.skips_after + data.operations.shifts); - } - buffer_json_object_close(wb); // items - - if(debug) { - buffer_json_member_add_object(wb, "stats"); - { - buffer_json_member_add_uint64(wb, "first", data.operations.first); - buffer_json_member_add_uint64(wb, "prepend", data.operations.prepend); - buffer_json_member_add_uint64(wb, "append", data.operations.append); - buffer_json_member_add_uint64(wb, "backwards", data.operations.backwards); - buffer_json_member_add_uint64(wb, "forwards", data.operations.forwards); - buffer_json_member_add_uint64(wb, "shifts", data.operations.shifts); - buffer_json_member_add_uint64(wb, "skips_before", data.operations.skips_before); - buffer_json_member_add_uint64(wb, "skips_after", data.operations.skips_after); - } - buffer_json_object_close(wb); - } -} - -int rrdcontext_to_json_v2(BUFFER *wb, struct api_v2_contexts_request *req, CONTEXTS_V2_MODE mode) { - int resp = HTTP_RESP_OK; - bool run = true; - - if(mode & CONTEXTS_V2_SEARCH) - mode |= CONTEXTS_V2_CONTEXTS; - - if(mode & (CONTEXTS_V2_AGENTS_INFO)) - mode |= CONTEXTS_V2_AGENTS; - - if(mode & (CONTEXTS_V2_FUNCTIONS | CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_NODES_INFO | CONTEXTS_V2_NODE_INSTANCES)) - mode |= CONTEXTS_V2_NODES; - - if(mode & CONTEXTS_V2_ALERTS) { - mode |= CONTEXTS_V2_NODES; - req->options &= ~CONTEXT_V2_OPTION_ALERTS_WITH_CONFIGURATIONS; - - if(!(req->options & (CONTEXT_V2_OPTION_ALERTS_WITH_SUMMARY|CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES|CONTEXT_V2_OPTION_ALERTS_WITH_VALUES))) - req->options |= CONTEXT_V2_OPTION_ALERTS_WITH_SUMMARY; - } - - if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { - mode |= CONTEXTS_V2_NODES; - req->options &= ~CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES; - } - - struct rrdcontext_to_json_v2_data ctl = { - .wb = wb, - .request = req, - .mode = mode, - .options = req->options, - .versions = { 0 }, - .nodes.scope_pattern = string_to_simple_pattern(req->scope_nodes), - .nodes.pattern = string_to_simple_pattern(req->nodes), - .contexts.pattern = string_to_simple_pattern(req->contexts), - .contexts.scope_pattern = string_to_simple_pattern(req->scope_contexts), - .q.pattern = string_to_simple_pattern_nocase(req->q), - .alerts.alert_name_pattern = string_to_simple_pattern(req->alerts.alert), - .window = { - .enabled = false, - .relative = false, - .after = req->after, - .before = req->before, - }, - .timings = { - .received_ut = now_monotonic_usec(), - } - }; - - bool debug = ctl.options & CONTEXT_V2_OPTION_DEBUG; - - if(mode & CONTEXTS_V2_NODES) { - ctl.nodes.dict = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct contexts_v2_node)); - } - - if(mode & CONTEXTS_V2_CONTEXTS) { - ctl.contexts.dict = dictionary_create_advanced( - DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, - sizeof(struct context_v2_entry)); - - dictionary_register_conflict_callback(ctl.contexts.dict, contexts_conflict_callback, &ctl); - dictionary_register_delete_callback(ctl.contexts.dict, contexts_delete_callback, &ctl); - } - - if(mode & CONTEXTS_V2_FUNCTIONS) { - ctl.functions.dict = dictionary_create_advanced( - DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, - sizeof(struct function_v2_entry)); - - dictionary_register_insert_callback(ctl.functions.dict, functions_insert_callback, &ctl); - dictionary_register_conflict_callback(ctl.functions.dict, functions_conflict_callback, &ctl); - dictionary_register_delete_callback(ctl.functions.dict, functions_delete_callback, &ctl); - } - - if(mode & CONTEXTS_V2_ALERTS) { - if(req->alerts.transition) { - ctl.options |= CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES|CONTEXT_V2_OPTION_ALERTS_WITH_VALUES; - run = sql_find_alert_transition(req->alerts.transition, rrdcontext_v2_set_transition_filter, &ctl); - if(!run) { - resp = HTTP_RESP_NOT_FOUND; - goto cleanup; - } - } - - ctl.alerts.summary = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct alert_v2_entry)); - - dictionary_register_insert_callback(ctl.alerts.summary, alerts_v2_insert_callback, &ctl); - dictionary_register_conflict_callback(ctl.alerts.summary, alerts_v2_conflict_callback, &ctl); - dictionary_register_delete_callback(ctl.alerts.summary, alerts_v2_delete_callback, &ctl); - - ctl.alerts.by_type = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct alert_by_x_entry)); - - dictionary_register_insert_callback(ctl.alerts.by_type, alerts_by_x_insert_callback, NULL); - dictionary_register_conflict_callback(ctl.alerts.by_type, alerts_by_x_conflict_callback, NULL); - - ctl.alerts.by_component = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct alert_by_x_entry)); - - dictionary_register_insert_callback(ctl.alerts.by_component, alerts_by_x_insert_callback, NULL); - dictionary_register_conflict_callback(ctl.alerts.by_component, alerts_by_x_conflict_callback, NULL); - - ctl.alerts.by_classification = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct alert_by_x_entry)); - - dictionary_register_insert_callback(ctl.alerts.by_classification, alerts_by_x_insert_callback, NULL); - dictionary_register_conflict_callback(ctl.alerts.by_classification, alerts_by_x_conflict_callback, NULL); - - ctl.alerts.by_recipient = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct alert_by_x_entry)); - - dictionary_register_insert_callback(ctl.alerts.by_recipient, alerts_by_x_insert_callback, NULL); - dictionary_register_conflict_callback(ctl.alerts.by_recipient, alerts_by_x_conflict_callback, NULL); - - ctl.alerts.by_module = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct alert_by_x_entry)); - - dictionary_register_insert_callback(ctl.alerts.by_module, alerts_by_x_insert_callback, NULL); - dictionary_register_conflict_callback(ctl.alerts.by_module, alerts_by_x_conflict_callback, NULL); - - if(ctl.options & (CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES | CONTEXT_V2_OPTION_ALERTS_WITH_VALUES)) { - ctl.alerts.alert_instances = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct sql_alert_instance_v2_entry)); - - dictionary_register_insert_callback(ctl.alerts.alert_instances, alert_instances_v2_insert_callback, &ctl); - dictionary_register_conflict_callback(ctl.alerts.alert_instances, alert_instances_v2_conflict_callback, &ctl); - dictionary_register_delete_callback(ctl.alerts.alert_instances, alert_instances_delete_callback, &ctl); - } - } - - if(req->after || req->before) { - ctl.window.relative = rrdr_relative_window_to_absolute_query(&ctl.window.after, &ctl.window.before, &ctl.now - , false - ); - ctl.window.enabled = !(mode & CONTEXTS_V2_ALERT_TRANSITIONS); - } - else - ctl.now = now_realtime_sec(); - - buffer_json_initialize(wb, "\"", "\"", 0, true, - ((req->options & CONTEXT_V2_OPTION_MINIFY) && !(req->options & CONTEXT_V2_OPTION_DEBUG)) ? BUFFER_JSON_OPTIONS_MINIFY : BUFFER_JSON_OPTIONS_DEFAULT); - - buffer_json_member_add_uint64(wb, "api", 2); - - if(req->options & CONTEXT_V2_OPTION_DEBUG) { - buffer_json_member_add_object(wb, "request"); - { - buffer_json_contexts_v2_mode_to_array(wb, "mode", mode); - web_client_api_request_v2_contexts_options_to_buffer_json_array(wb, "options", req->options); - - buffer_json_member_add_object(wb, "scope"); - { - buffer_json_member_add_string(wb, "scope_nodes", req->scope_nodes); - if (mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS)) - buffer_json_member_add_string(wb, "scope_contexts", req->scope_contexts); - } - buffer_json_object_close(wb); - - buffer_json_member_add_object(wb, "selectors"); - { - buffer_json_member_add_string(wb, "nodes", req->nodes); - - if (mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS)) - buffer_json_member_add_string(wb, "contexts", req->contexts); - - if(mode & (CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) { - buffer_json_member_add_object(wb, "alerts"); - - if(mode & CONTEXTS_V2_ALERTS) - web_client_api_request_v2_contexts_alerts_status_to_buffer_json_array(wb, "status", req->alerts.status); - - if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { - buffer_json_member_add_string(wb, "context", req->contexts); - buffer_json_member_add_uint64(wb, "anchor_gi", req->alerts.global_id_anchor); - buffer_json_member_add_uint64(wb, "last", req->alerts.last); - } - - buffer_json_member_add_string(wb, "alert", req->alerts.alert); - buffer_json_member_add_string(wb, "transition", req->alerts.transition); - buffer_json_object_close(wb); // alerts - } - } - buffer_json_object_close(wb); // selectors - - buffer_json_member_add_object(wb, "filters"); - { - if (mode & CONTEXTS_V2_SEARCH) - buffer_json_member_add_string(wb, "q", req->q); - - buffer_json_member_add_time_t(wb, "after", req->after); - buffer_json_member_add_time_t(wb, "before", req->before); - } - buffer_json_object_close(wb); // filters - - if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { - buffer_json_member_add_object(wb, "facets"); - { - for (int i = 0; i < ATF_TOTAL_ENTRIES; i++) { - buffer_json_member_add_string(wb, alert_transition_facets[i].query_param, req->alerts.facets[i]); - } - } - buffer_json_object_close(wb); // facets - } - } - buffer_json_object_close(wb); - } - - ssize_t ret = 0; - if(run) - ret = query_scope_foreach_host(ctl.nodes.scope_pattern, ctl.nodes.pattern, - rrdcontext_to_json_v2_add_host, &ctl, - &ctl.versions, ctl.q.host_node_id_str); - - if(unlikely(ret < 0)) { - buffer_flush(wb); - - if(ret == -2) { - buffer_strcat(wb, "query timeout"); - resp = HTTP_RESP_GATEWAY_TIMEOUT; - } - else { - buffer_strcat(wb, "query interrupted"); - resp = HTTP_RESP_CLIENT_CLOSED_REQUEST; - } - goto cleanup; - } - - ctl.timings.executed_ut = now_monotonic_usec(); - - if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { - contexts_v2_alert_transitions_to_json(wb, &ctl, debug); - } - else { - if (mode & CONTEXTS_V2_NODES) { - buffer_json_member_add_array(wb, "nodes"); - struct contexts_v2_node *t; - dfe_start_read(ctl.nodes.dict, t) { - rrdcontext_to_json_v2_rrdhost(wb, t->host, &ctl, t->ni); - } - dfe_done(t); - buffer_json_array_close(wb); - } - - if (mode & CONTEXTS_V2_FUNCTIONS) { - buffer_json_member_add_array(wb, "functions"); - { - struct function_v2_entry *t; - dfe_start_read(ctl.functions.dict, t) { - buffer_json_add_array_item_object(wb); - { - buffer_json_member_add_string(wb, "name", t_dfe.name); - buffer_json_member_add_string(wb, "help", string2str(t->help)); - buffer_json_member_add_array(wb, "ni"); - { - for (size_t i = 0; i < t->used; i++) - buffer_json_add_array_item_uint64(wb, t->node_ids[i]); - } - buffer_json_array_close(wb); - buffer_json_member_add_string(wb, "tags", string2str(t->tags)); - http_access2buffer_json_array(wb, "access", t->access); - buffer_json_member_add_uint64(wb, "priority", t->priority); - } - buffer_json_object_close(wb); - } - dfe_done(t); - } - buffer_json_array_close(wb); - } - - if (mode & CONTEXTS_V2_CONTEXTS) { - buffer_json_member_add_object(wb, "contexts"); - { - struct context_v2_entry *z; - dfe_start_read(ctl.contexts.dict, z) { - bool collected = z->flags & RRD_FLAG_COLLECTED; - - buffer_json_member_add_object(wb, string2str(z->id)); - { - buffer_json_member_add_string(wb, "family", string2str(z->family)); - buffer_json_member_add_uint64(wb, "priority", z->priority); - buffer_json_member_add_time_t(wb, "first_entry", z->first_time_s); - buffer_json_member_add_time_t(wb, "last_entry", collected ? ctl.now : z->last_time_s); - buffer_json_member_add_boolean(wb, "live", collected); - if (mode & CONTEXTS_V2_SEARCH) - buffer_json_member_add_string(wb, "match", fts_match_to_string(z->match)); - } - buffer_json_object_close(wb); - } - dfe_done(z); - } - buffer_json_object_close(wb); // contexts - } - - if (mode & CONTEXTS_V2_ALERTS) - contexts_v2_alerts_to_json(wb, &ctl, debug); - - if (mode & CONTEXTS_V2_SEARCH) { - buffer_json_member_add_object(wb, "searches"); - { - buffer_json_member_add_uint64(wb, "strings", ctl.q.fts.string_searches); - buffer_json_member_add_uint64(wb, "char", ctl.q.fts.char_searches); - buffer_json_member_add_uint64(wb, "total", ctl.q.fts.searches); - } - buffer_json_object_close(wb); - } - - if (mode & (CONTEXTS_V2_VERSIONS)) - version_hashes_api_v2(wb, &ctl.versions); - - if (mode & CONTEXTS_V2_AGENTS) - buffer_json_agents_v2(wb, &ctl.timings, ctl.now, mode & (CONTEXTS_V2_AGENTS_INFO), true); - } - - buffer_json_cloud_timings(wb, "timings", &ctl.timings); - - buffer_json_finalize(wb); - -cleanup: - dictionary_destroy(ctl.nodes.dict); - dictionary_destroy(ctl.contexts.dict); - dictionary_destroy(ctl.functions.dict); - dictionary_destroy(ctl.alerts.summary); - dictionary_destroy(ctl.alerts.alert_instances); - dictionary_destroy(ctl.alerts.by_type); - dictionary_destroy(ctl.alerts.by_component); - dictionary_destroy(ctl.alerts.by_classification); - dictionary_destroy(ctl.alerts.by_recipient); - dictionary_destroy(ctl.alerts.by_module); - simple_pattern_free(ctl.nodes.scope_pattern); - simple_pattern_free(ctl.nodes.pattern); - simple_pattern_free(ctl.contexts.pattern); - simple_pattern_free(ctl.contexts.scope_pattern); - simple_pattern_free(ctl.q.pattern); - simple_pattern_free(ctl.alerts.alert_name_pattern); - - return resp; -} diff --git a/src/database/contexts/api_v2_contexts.c b/src/database/contexts/api_v2_contexts.c new file mode 100644 index 00000000000000..000f6b78496ae4 --- /dev/null +++ b/src/database/contexts/api_v2_contexts.c @@ -0,0 +1,1001 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_contexts.h" + +#include "aclk/aclk_capas.h" + +// ---------------------------------------------------------------------------- +// /api/v2/contexts API + +static const char *fts_match_to_string(FTS_MATCH match) { + switch(match) { + case FTS_MATCHED_HOST: + return "HOST"; + + case FTS_MATCHED_CONTEXT: + return "CONTEXT"; + + case FTS_MATCHED_INSTANCE: + return "INSTANCE"; + + case FTS_MATCHED_DIMENSION: + return "DIMENSION"; + + case FTS_MATCHED_ALERT: + return "ALERT"; + + case FTS_MATCHED_ALERT_INFO: + return "ALERT_INFO"; + + case FTS_MATCHED_LABEL: + return "LABEL"; + + case FTS_MATCHED_FAMILY: + return "FAMILY"; + + case FTS_MATCHED_TITLE: + return "TITLE"; + + case FTS_MATCHED_UNITS: + return "UNITS"; + + default: + return "NONE"; + } +} + +struct function_v2_entry { + size_t size; + size_t used; + size_t *node_ids; + STRING *help; + STRING *tags; + HTTP_ACCESS access; + int priority; +}; + +struct context_v2_entry { + size_t count; + STRING *id; + STRING *family; + uint32_t priority; + time_t first_time_s; + time_t last_time_s; + RRD_FLAGS flags; + FTS_MATCH match; +}; + +static inline bool full_text_search_string(FTS_INDEX *fts, SIMPLE_PATTERN *q, STRING *ptr) { + fts->searches++; + fts->string_searches++; + return simple_pattern_matches_string(q, ptr); +} + +static inline bool full_text_search_char(FTS_INDEX *fts, SIMPLE_PATTERN *q, char *ptr) { + fts->searches++; + fts->char_searches++; + return simple_pattern_matches(q, ptr); +} + +static FTS_MATCH rrdcontext_to_json_v2_full_text_search(struct rrdcontext_to_json_v2_data *ctl, RRDCONTEXT *rc, SIMPLE_PATTERN *q) { + if(unlikely(full_text_search_string(&ctl->q.fts, q, rc->id) || + full_text_search_string(&ctl->q.fts, q, rc->family))) + return FTS_MATCHED_CONTEXT; + + if(unlikely(full_text_search_string(&ctl->q.fts, q, rc->title))) + return FTS_MATCHED_TITLE; + + if(unlikely(full_text_search_string(&ctl->q.fts, q, rc->units))) + return FTS_MATCHED_UNITS; + + FTS_MATCH matched = FTS_MATCHED_NONE; + RRDINSTANCE *ri; + dfe_start_read(rc->rrdinstances, ri) { + if(matched) break; + + if(ctl->window.enabled && !query_matches_retention(ctl->window.after, ctl->window.before, ri->first_time_s, (ri->flags & RRD_FLAG_COLLECTED) ? ctl->now : ri->last_time_s, 0)) + continue; + + if(unlikely(full_text_search_string(&ctl->q.fts, q, ri->id)) || + (ri->name != ri->id && full_text_search_string(&ctl->q.fts, q, ri->name))) { + matched = FTS_MATCHED_INSTANCE; + break; + } + + RRDMETRIC *rm; + dfe_start_read(ri->rrdmetrics, rm) { + if(ctl->window.enabled && !query_matches_retention(ctl->window.after, ctl->window.before, rm->first_time_s, (rm->flags & RRD_FLAG_COLLECTED) ? ctl->now : rm->last_time_s, 0)) + continue; + + if(unlikely(full_text_search_string(&ctl->q.fts, q, rm->id)) || + (rm->name != rm->id && full_text_search_string(&ctl->q.fts, q, rm->name))) { + matched = FTS_MATCHED_DIMENSION; + break; + } + } + dfe_done(rm); + + size_t label_searches = 0; + if(unlikely(ri->rrdlabels && rrdlabels_entries(ri->rrdlabels) && + rrdlabels_match_simple_pattern_parsed(ri->rrdlabels, q, ':', &label_searches) == SP_MATCHED_POSITIVE)) { + ctl->q.fts.searches += label_searches; + ctl->q.fts.char_searches += label_searches; + matched = FTS_MATCHED_LABEL; + break; + } + ctl->q.fts.searches += label_searches; + ctl->q.fts.char_searches += label_searches; + + if(ri->rrdset) { + RRDSET *st = ri->rrdset; + rw_spinlock_read_lock(&st->alerts.spinlock); + for (RRDCALC *rcl = st->alerts.base; rcl; rcl = rcl->next) { + if(unlikely(full_text_search_string(&ctl->q.fts, q, rcl->config.name))) { + matched = FTS_MATCHED_ALERT; + break; + } + + if(unlikely(full_text_search_string(&ctl->q.fts, q, rcl->config.info))) { + matched = FTS_MATCHED_ALERT_INFO; + break; + } + } + rw_spinlock_read_unlock(&st->alerts.spinlock); + } + } + dfe_done(ri); + return matched; +} + +static ssize_t rrdcontext_to_json_v2_add_context(void *data, RRDCONTEXT_ACQUIRED *rca, bool queryable_context __maybe_unused) { + struct rrdcontext_to_json_v2_data *ctl = data; + + RRDCONTEXT *rc = rrdcontext_acquired_value(rca); + + if(ctl->window.enabled && !query_matches_retention(ctl->window.after, ctl->window.before, rc->first_time_s, (rc->flags & RRD_FLAG_COLLECTED) ? ctl->now : rc->last_time_s, 0)) + return 0; // continue to next context + + FTS_MATCH match = ctl->q.host_match; + if((ctl->mode & CONTEXTS_V2_SEARCH) && ctl->q.pattern) { + match = rrdcontext_to_json_v2_full_text_search(ctl, rc, ctl->q.pattern); + + if(match == FTS_MATCHED_NONE) + return 0; // continue to next context + } + + if(ctl->mode & CONTEXTS_V2_ALERTS) { + if(!rrdcontext_matches_alert(ctl, rc)) + return 0; // continue to next context + } + + if(ctl->contexts.dict) { + struct context_v2_entry t = { + .count = 1, + .id = rc->id, + .family = string_dup(rc->family), + .priority = rc->priority, + .first_time_s = rc->first_time_s, + .last_time_s = rc->last_time_s, + .flags = rc->flags, + .match = match, + }; + + dictionary_set(ctl->contexts.dict, string2str(rc->id), &t, sizeof(struct context_v2_entry)); + } + + return 1; +} + +void buffer_json_agent_status_id(BUFFER *wb, size_t ai, usec_t duration_ut) { + buffer_json_member_add_object(wb, "st"); + { + buffer_json_member_add_uint64(wb, "ai", ai); + buffer_json_member_add_uint64(wb, "code", 200); + buffer_json_member_add_string(wb, "msg", ""); + if (duration_ut) + buffer_json_member_add_double(wb, "ms", (NETDATA_DOUBLE) duration_ut / 1000.0); + } + buffer_json_object_close(wb); +} + +void buffer_json_node_add_v2(BUFFER *wb, RRDHOST *host, size_t ni, usec_t duration_ut, bool status) { + buffer_json_member_add_string(wb, "mg", host->machine_guid); + + if(!uuid_is_null(host->node_id)) + buffer_json_member_add_uuid(wb, "nd", host->node_id); + buffer_json_member_add_string(wb, "nm", rrdhost_hostname(host)); + buffer_json_member_add_uint64(wb, "ni", ni); + + if(status) + buffer_json_agent_status_id(wb, 0, duration_ut); +} + +static void rrdhost_receiver_to_json(BUFFER *wb, RRDHOST_STATUS *s, const char *key) { + buffer_json_member_add_object(wb, key); + { + buffer_json_member_add_uint64(wb, "id", s->ingest.id); + buffer_json_member_add_uint64(wb, "hops", s->ingest.hops); + buffer_json_member_add_string(wb, "type", rrdhost_ingest_type_to_string(s->ingest.type)); + buffer_json_member_add_string(wb, "status", rrdhost_ingest_status_to_string(s->ingest.status)); + buffer_json_member_add_time_t(wb, "since", s->ingest.since); + buffer_json_member_add_time_t(wb, "age", s->now - s->ingest.since); + + if(s->ingest.type == RRDHOST_INGEST_TYPE_CHILD) { + if(s->ingest.status == RRDHOST_INGEST_STATUS_OFFLINE) + buffer_json_member_add_string(wb, "reason", stream_handshake_error_to_string(s->ingest.reason)); + + if(s->ingest.status == RRDHOST_INGEST_STATUS_REPLICATING) { + buffer_json_member_add_object(wb, "replication"); + { + buffer_json_member_add_boolean(wb, "in_progress", s->ingest.replication.in_progress); + buffer_json_member_add_double(wb, "completion", s->ingest.replication.completion); + buffer_json_member_add_uint64(wb, "instances", s->ingest.replication.instances); + } + buffer_json_object_close(wb); // replication + } + + if(s->ingest.status == RRDHOST_INGEST_STATUS_REPLICATING || s->ingest.status == RRDHOST_INGEST_STATUS_ONLINE) { + buffer_json_member_add_object(wb, "source"); + { + char buf[1024 + 1]; + snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->ingest.peers.local.ip, s->ingest.peers.local.port, s->ingest.ssl ? ":SSL" : ""); + buffer_json_member_add_string(wb, "local", buf); + + snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->ingest.peers.peer.ip, s->ingest.peers.peer.port, s->ingest.ssl ? ":SSL" : ""); + buffer_json_member_add_string(wb, "remote", buf); + + stream_capabilities_to_json_array(wb, s->ingest.capabilities, "capabilities"); + } + buffer_json_object_close(wb); // source + } + } + } + buffer_json_object_close(wb); // collection +} + +static void rrdhost_sender_to_json(BUFFER *wb, RRDHOST_STATUS *s, const char *key) { + if(s->stream.status == RRDHOST_STREAM_STATUS_DISABLED) + return; + + buffer_json_member_add_object(wb, key); + { + buffer_json_member_add_uint64(wb, "id", s->stream.id); + buffer_json_member_add_uint64(wb, "hops", s->stream.hops); + buffer_json_member_add_string(wb, "status", rrdhost_streaming_status_to_string(s->stream.status)); + buffer_json_member_add_time_t(wb, "since", s->stream.since); + buffer_json_member_add_time_t(wb, "age", s->now - s->stream.since); + + if (s->stream.status == RRDHOST_STREAM_STATUS_OFFLINE) + buffer_json_member_add_string(wb, "reason", stream_handshake_error_to_string(s->stream.reason)); + + if (s->stream.status == RRDHOST_STREAM_STATUS_REPLICATING) { + buffer_json_member_add_object(wb, "replication"); + { + buffer_json_member_add_boolean(wb, "in_progress", s->stream.replication.in_progress); + buffer_json_member_add_double(wb, "completion", s->stream.replication.completion); + buffer_json_member_add_uint64(wb, "instances", s->stream.replication.instances); + } + buffer_json_object_close(wb); + } + + buffer_json_member_add_object(wb, "destination"); + { + char buf[1024 + 1]; + snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->stream.peers.local.ip, s->stream.peers.local.port, s->stream.ssl ? ":SSL" : ""); + buffer_json_member_add_string(wb, "local", buf); + + snprintfz(buf, sizeof(buf) - 1, "[%s]:%d%s", s->stream.peers.peer.ip, s->stream.peers.peer.port, s->stream.ssl ? ":SSL" : ""); + buffer_json_member_add_string(wb, "remote", buf); + + stream_capabilities_to_json_array(wb, s->stream.capabilities, "capabilities"); + + buffer_json_member_add_object(wb, "traffic"); + { + buffer_json_member_add_boolean(wb, "compression", s->stream.compression); + buffer_json_member_add_uint64(wb, "data", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DATA]); + buffer_json_member_add_uint64(wb, "metadata", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_METADATA]); + buffer_json_member_add_uint64(wb, "functions", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_FUNCTIONS]); + buffer_json_member_add_uint64(wb, "replication", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_REPLICATION]); + buffer_json_member_add_uint64(wb, "dyncfg", s->stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DYNCFG]); + } + buffer_json_object_close(wb); // traffic + + buffer_json_member_add_array(wb, "candidates"); + struct rrdpush_destinations *d; + for (d = s->host->destinations; d; d = d->next) { + buffer_json_add_array_item_object(wb); + buffer_json_member_add_uint64(wb, "attempts", d->attempts); + { + + if (d->ssl) { + snprintfz(buf, sizeof(buf) - 1, "%s:SSL", string2str(d->destination)); + buffer_json_member_add_string(wb, "destination", buf); + } + else + buffer_json_member_add_string(wb, "destination", string2str(d->destination)); + + buffer_json_member_add_time_t(wb, "since", d->since); + buffer_json_member_add_time_t(wb, "age", s->now - d->since); + buffer_json_member_add_string(wb, "last_handshake", stream_handshake_error_to_string(d->reason)); + if(d->postpone_reconnection_until > s->now) { + buffer_json_member_add_time_t(wb, "next_check", d->postpone_reconnection_until); + buffer_json_member_add_time_t(wb, "next_in", d->postpone_reconnection_until - s->now); + } + } + buffer_json_object_close(wb); // each candidate + } + buffer_json_array_close(wb); // candidates + } + buffer_json_object_close(wb); // destination + } + buffer_json_object_close(wb); // streaming +} + +void agent_capabilities_to_json(BUFFER *wb, RRDHOST *host, const char *key) { + buffer_json_member_add_array(wb, key); + + struct capability *capas = aclk_get_node_instance_capas(host); + for(struct capability *capa = capas; capa->name ;capa++) { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "name", capa->name); + buffer_json_member_add_uint64(wb, "version", capa->version); + buffer_json_member_add_boolean(wb, "enabled", capa->enabled); + } + buffer_json_object_close(wb); + } + buffer_json_array_close(wb); + freez(capas); +} + +static inline void host_dyncfg_to_json_v2(BUFFER *wb, const char *key, RRDHOST_STATUS *s) { + buffer_json_member_add_object(wb, key); + { + buffer_json_member_add_string(wb, "status", rrdhost_dyncfg_status_to_string(s->dyncfg.status)); + } + buffer_json_object_close(wb); // health + +} + +static inline void rrdhost_health_to_json_v2(BUFFER *wb, const char *key, RRDHOST_STATUS *s) { + buffer_json_member_add_object(wb, key); + { + buffer_json_member_add_string(wb, "status", rrdhost_health_status_to_string(s->health.status)); + if (s->health.status == RRDHOST_HEALTH_STATUS_RUNNING) { + buffer_json_member_add_object(wb, "alerts"); + { + buffer_json_member_add_uint64(wb, "critical", s->health.alerts.critical); + buffer_json_member_add_uint64(wb, "warning", s->health.alerts.warning); + buffer_json_member_add_uint64(wb, "clear", s->health.alerts.clear); + buffer_json_member_add_uint64(wb, "undefined", s->health.alerts.undefined); + buffer_json_member_add_uint64(wb, "uninitialized", s->health.alerts.uninitialized); + } + buffer_json_object_close(wb); // alerts + } + } + buffer_json_object_close(wb); // health +} + +static void rrdcontext_to_json_v2_rrdhost(BUFFER *wb, RRDHOST *host, struct rrdcontext_to_json_v2_data *ctl, size_t node_id) { + buffer_json_add_array_item_object(wb); // this node + buffer_json_node_add_v2(wb, host, node_id, 0, + (ctl->mode & CONTEXTS_V2_AGENTS) && !(ctl->mode & CONTEXTS_V2_NODE_INSTANCES)); + + if(ctl->mode & (CONTEXTS_V2_NODES_INFO | CONTEXTS_V2_NODE_INSTANCES)) { + RRDHOST_STATUS s; + rrdhost_status(host, ctl->now, &s); + + if (ctl->mode & (CONTEXTS_V2_NODES_INFO)) { + buffer_json_member_add_string(wb, "v", rrdhost_program_version(host)); + + host_labels2json(host, wb, "labels"); + + if (host->system_info) { + buffer_json_member_add_object(wb, "hw"); + { + buffer_json_member_add_string_or_empty(wb, "architecture", host->system_info->architecture); + buffer_json_member_add_string_or_empty(wb, "cpu_frequency", host->system_info->host_cpu_freq); + buffer_json_member_add_string_or_empty(wb, "cpus", host->system_info->host_cores); + buffer_json_member_add_string_or_empty(wb, "memory", host->system_info->host_ram_total); + buffer_json_member_add_string_or_empty(wb, "disk_space", host->system_info->host_disk_space); + buffer_json_member_add_string_or_empty(wb, "virtualization", host->system_info->virtualization); + buffer_json_member_add_string_or_empty(wb, "container", host->system_info->container); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "os"); + { + buffer_json_member_add_string_or_empty(wb, "id", host->system_info->host_os_id); + buffer_json_member_add_string_or_empty(wb, "nm", host->system_info->host_os_name); + buffer_json_member_add_string_or_empty(wb, "v", host->system_info->host_os_version); + buffer_json_member_add_object(wb, "kernel"); + buffer_json_member_add_string_or_empty(wb, "nm", host->system_info->kernel_name); + buffer_json_member_add_string_or_empty(wb, "v", host->system_info->kernel_version); + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); + } + + // created - the node is created but never connected to cloud + // unreachable - not currently connected + // stale - connected but not having live data + // reachable - connected with live data + // pruned - not connected for some time and has been removed + buffer_json_member_add_string(wb, "state", rrdhost_state_cloud_emulation(host) ? "reachable" : "stale"); + + rrdhost_health_to_json_v2(wb, "health", &s); + agent_capabilities_to_json(wb, host, "capabilities"); + } + + if (ctl->mode & (CONTEXTS_V2_NODE_INSTANCES)) { + buffer_json_member_add_array(wb, "instances"); + buffer_json_add_array_item_object(wb); // this instance + { + buffer_json_agent_status_id(wb, 0, 0); + + buffer_json_member_add_object(wb, "db"); + { + buffer_json_member_add_string(wb, "status", rrdhost_db_status_to_string(s.db.status)); + buffer_json_member_add_string(wb, "liveness", rrdhost_db_liveness_to_string(s.db.liveness)); + buffer_json_member_add_string(wb, "mode", rrd_memory_mode_name(s.db.mode)); + buffer_json_member_add_time_t(wb, "first_time", s.db.first_time_s); + buffer_json_member_add_time_t(wb, "last_time", s.db.last_time_s); + buffer_json_member_add_uint64(wb, "metrics", s.db.metrics); + buffer_json_member_add_uint64(wb, "instances", s.db.instances); + buffer_json_member_add_uint64(wb, "contexts", s.db.contexts); + } + buffer_json_object_close(wb); + + rrdhost_receiver_to_json(wb, &s, "ingest"); + rrdhost_sender_to_json(wb, &s, "stream"); + + buffer_json_member_add_object(wb, "ml"); + buffer_json_member_add_string(wb, "status", rrdhost_ml_status_to_string(s.ml.status)); + buffer_json_member_add_string(wb, "type", rrdhost_ml_type_to_string(s.ml.type)); + if (s.ml.status == RRDHOST_ML_STATUS_RUNNING) { + buffer_json_member_add_object(wb, "metrics"); + { + buffer_json_member_add_uint64(wb, "anomalous", s.ml.metrics.anomalous); + buffer_json_member_add_uint64(wb, "normal", s.ml.metrics.normal); + buffer_json_member_add_uint64(wb, "trained", s.ml.metrics.trained); + buffer_json_member_add_uint64(wb, "pending", s.ml.metrics.pending); + buffer_json_member_add_uint64(wb, "silenced", s.ml.metrics.silenced); + } + buffer_json_object_close(wb); // metrics + } + buffer_json_object_close(wb); // ml + + rrdhost_health_to_json_v2(wb, "health", &s); + + host_functions2json(host, wb); // functions + agent_capabilities_to_json(wb, host, "capabilities"); + + host_dyncfg_to_json_v2(wb, "dyncfg", &s); + } + buffer_json_object_close(wb); // this instance + buffer_json_array_close(wb); // instances + } + } + buffer_json_object_close(wb); // this node +} + +static ssize_t rrdcontext_to_json_v2_add_host(void *data, RRDHOST *host, bool queryable_host) { + if(!queryable_host || !host->rrdctx.contexts) + // the host matches the 'scope_host' but does not match the 'host' patterns + // or the host does not have any contexts + return 0; // continue to next host + + struct rrdcontext_to_json_v2_data *ctl = data; + + if(ctl->window.enabled && !rrdhost_matches_window(host, ctl->window.after, ctl->window.before, ctl->now)) + // the host does not have data in the requested window + return 0; // continue to next host + + if(ctl->request->timeout_ms && now_monotonic_usec() > ctl->timings.received_ut + ctl->request->timeout_ms * USEC_PER_MS) + // timed out + return -2; // stop the query + + if(ctl->request->interrupt_callback && ctl->request->interrupt_callback(ctl->request->interrupt_callback_data)) + // interrupted + return -1; // stop the query + + bool host_matched = (ctl->mode & CONTEXTS_V2_NODES); + bool do_contexts = (ctl->mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_ALERTS)); + + ctl->q.host_match = FTS_MATCHED_NONE; + if((ctl->mode & CONTEXTS_V2_SEARCH)) { + // check if we match the host itself + if(ctl->q.pattern && ( + full_text_search_string(&ctl->q.fts, ctl->q.pattern, host->hostname) || + full_text_search_char(&ctl->q.fts, ctl->q.pattern, host->machine_guid) || + (ctl->q.pattern && full_text_search_char(&ctl->q.fts, ctl->q.pattern, ctl->q.host_node_id_str)))) { + ctl->q.host_match = FTS_MATCHED_HOST; + do_contexts = true; + } + } + + if(do_contexts) { + // save it + SIMPLE_PATTERN *old_q = ctl->q.pattern; + + if(ctl->q.host_match == FTS_MATCHED_HOST) + // do not do pattern matching on contexts - we matched the host itself + ctl->q.pattern = NULL; + + ssize_t added = query_scope_foreach_context( + host, ctl->request->scope_contexts, + ctl->contexts.scope_pattern, ctl->contexts.pattern, + rrdcontext_to_json_v2_add_context, queryable_host, ctl); + + // restore it + ctl->q.pattern = old_q; + + if(unlikely(added < 0)) + return -1; // stop the query + + if(added) + host_matched = true; + } + + if(!host_matched) + return 0; + + if(ctl->mode & CONTEXTS_V2_FUNCTIONS) { + struct function_v2_entry t = { + .used = 1, + .size = 1, + .node_ids = &ctl->nodes.ni, + .help = NULL, + .tags = NULL, + .access = HTTP_ACCESS_ALL, + .priority = RRDFUNCTIONS_PRIORITY_DEFAULT, + }; + host_functions_to_dict(host, ctl->functions.dict, &t, sizeof(t), &t.help, &t.tags, &t.access, &t.priority); + } + + if(ctl->mode & CONTEXTS_V2_NODES) { + struct contexts_v2_node t = { + .ni = ctl->nodes.ni++, + .host = host, + }; + + dictionary_set(ctl->nodes.dict, host->machine_guid, &t, sizeof(struct contexts_v2_node)); + } + + return 1; +} + +static void buffer_json_contexts_v2_mode_to_array(BUFFER *wb, const char *key, CONTEXTS_V2_MODE mode) { + buffer_json_member_add_array(wb, key); + + if(mode & CONTEXTS_V2_VERSIONS) + buffer_json_add_array_item_string(wb, "versions"); + + if(mode & CONTEXTS_V2_AGENTS) + buffer_json_add_array_item_string(wb, "agents"); + + if(mode & CONTEXTS_V2_AGENTS_INFO) + buffer_json_add_array_item_string(wb, "agents-info"); + + if(mode & CONTEXTS_V2_NODES) + buffer_json_add_array_item_string(wb, "nodes"); + + if(mode & CONTEXTS_V2_NODES_INFO) + buffer_json_add_array_item_string(wb, "nodes-info"); + + if(mode & CONTEXTS_V2_NODE_INSTANCES) + buffer_json_add_array_item_string(wb, "nodes-instances"); + + if(mode & CONTEXTS_V2_CONTEXTS) + buffer_json_add_array_item_string(wb, "contexts"); + + if(mode & CONTEXTS_V2_SEARCH) + buffer_json_add_array_item_string(wb, "search"); + + if(mode & CONTEXTS_V2_ALERTS) + buffer_json_add_array_item_string(wb, "alerts"); + + if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) + buffer_json_add_array_item_string(wb, "alert_transitions"); + + buffer_json_array_close(wb); +} + +void buffer_json_query_timings(BUFFER *wb, const char *key, struct query_timings *timings) { + timings->finished_ut = now_monotonic_usec(); + if(!timings->executed_ut) + timings->executed_ut = timings->finished_ut; + if(!timings->preprocessed_ut) + timings->preprocessed_ut = timings->received_ut; + buffer_json_member_add_object(wb, key); + buffer_json_member_add_double(wb, "prep_ms", (NETDATA_DOUBLE)(timings->preprocessed_ut - timings->received_ut) / USEC_PER_MS); + buffer_json_member_add_double(wb, "query_ms", (NETDATA_DOUBLE)(timings->executed_ut - timings->preprocessed_ut) / USEC_PER_MS); + buffer_json_member_add_double(wb, "output_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->executed_ut) / USEC_PER_MS); + buffer_json_member_add_double(wb, "total_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->received_ut) / USEC_PER_MS); + buffer_json_member_add_double(wb, "cloud_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->received_ut) / USEC_PER_MS); + buffer_json_object_close(wb); +} + +void buffer_json_cloud_timings(BUFFER *wb, const char *key, struct query_timings *timings) { + if(!timings->finished_ut) + timings->finished_ut = now_monotonic_usec(); + + buffer_json_member_add_object(wb, key); + buffer_json_member_add_double(wb, "routing_ms", 0.0); + buffer_json_member_add_double(wb, "node_max_ms", 0.0); + buffer_json_member_add_double(wb, "total_ms", (NETDATA_DOUBLE)(timings->finished_ut - timings->received_ut) / USEC_PER_MS); + buffer_json_object_close(wb); +} + +static void functions_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct function_v2_entry *t = value; + + // it is initialized with a static reference - we need to mallocz() the array + size_t *v = t->node_ids; + t->node_ids = mallocz(sizeof(size_t)); + *t->node_ids = *v; + t->size = 1; + t->used = 1; +} + +static bool functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) { + struct function_v2_entry *t = old_value, *n = new_value; + size_t *v = n->node_ids; + + if(t->used >= t->size) { + t->node_ids = reallocz(t->node_ids, t->size * 2 * sizeof(size_t)); + t->size *= 2; + } + + t->node_ids[t->used++] = *v; + + return true; +} + +static void functions_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct function_v2_entry *t = value; + freez(t->node_ids); +} + +static bool contexts_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) { + struct context_v2_entry *o = old_value; + struct context_v2_entry *n = new_value; + + o->count++; + + if(o->family != n->family) { + if((o->flags & RRD_FLAG_COLLECTED) && !(n->flags & RRD_FLAG_COLLECTED)) + // keep old + ; + else if(!(o->flags & RRD_FLAG_COLLECTED) && (n->flags & RRD_FLAG_COLLECTED)) { + // keep new + string_freez(o->family); + o->family = string_dup(n->family); + } + else { + // merge + STRING *old_family = o->family; + o->family = string_2way_merge(o->family, n->family); + string_freez(old_family); + } + } + + if(o->priority != n->priority) { + if((o->flags & RRD_FLAG_COLLECTED) && !(n->flags & RRD_FLAG_COLLECTED)) + // keep o + ; + else if(!(o->flags & RRD_FLAG_COLLECTED) && (n->flags & RRD_FLAG_COLLECTED)) + // keep n + o->priority = n->priority; + else + // keep the min + o->priority = MIN(o->priority, n->priority); + } + + if(o->first_time_s && n->first_time_s) + o->first_time_s = MIN(o->first_time_s, n->first_time_s); + else if(!o->first_time_s) + o->first_time_s = n->first_time_s; + + if(o->last_time_s && n->last_time_s) + o->last_time_s = MAX(o->last_time_s, n->last_time_s); + else if(!o->last_time_s) + o->last_time_s = n->last_time_s; + + o->flags |= n->flags; + o->match = MIN(o->match, n->match); + + string_freez(n->family); + + return true; +} + +static void contexts_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct context_v2_entry *z = value; + string_freez(z->family); +} + +int rrdcontext_to_json_v2(BUFFER *wb, struct api_v2_contexts_request *req, CONTEXTS_V2_MODE mode) { + int resp = HTTP_RESP_OK; + bool run = true; + + if(mode & CONTEXTS_V2_SEARCH) + mode |= CONTEXTS_V2_CONTEXTS; + + if(mode & (CONTEXTS_V2_AGENTS_INFO)) + mode |= CONTEXTS_V2_AGENTS; + + if(mode & (CONTEXTS_V2_FUNCTIONS | CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_NODES_INFO | CONTEXTS_V2_NODE_INSTANCES)) + mode |= CONTEXTS_V2_NODES; + + if(mode & CONTEXTS_V2_ALERTS) { + mode |= CONTEXTS_V2_NODES; + req->options &= ~CONTEXTS_OPTION_ALERTS_WITH_CONFIGURATIONS; + + if(!(req->options & (CONTEXTS_OPTION_ALERTS_WITH_SUMMARY | CONTEXTS_OPTION_ALERTS_WITH_INSTANCES | + CONTEXTS_OPTION_ALERTS_WITH_VALUES))) + req->options |= CONTEXTS_OPTION_ALERTS_WITH_SUMMARY; + } + + if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { + mode |= CONTEXTS_V2_NODES; + req->options &= ~CONTEXTS_OPTION_ALERTS_WITH_INSTANCES; + } + + struct rrdcontext_to_json_v2_data ctl = { + .wb = wb, + .request = req, + .mode = mode, + .options = req->options, + .versions = { 0 }, + .nodes.scope_pattern = string_to_simple_pattern(req->scope_nodes), + .nodes.pattern = string_to_simple_pattern(req->nodes), + .contexts.pattern = string_to_simple_pattern(req->contexts), + .contexts.scope_pattern = string_to_simple_pattern(req->scope_contexts), + .q.pattern = string_to_simple_pattern_nocase(req->q), + .alerts.alert_name_pattern = string_to_simple_pattern(req->alerts.alert), + .window = { + .enabled = false, + .relative = false, + .after = req->after, + .before = req->before, + }, + .timings = { + .received_ut = now_monotonic_usec(), + } + }; + + bool debug = ctl.options & CONTEXTS_OPTION_DEBUG; + + if(mode & CONTEXTS_V2_NODES) { + ctl.nodes.dict = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, sizeof(struct contexts_v2_node)); + } + + if(mode & CONTEXTS_V2_CONTEXTS) { + ctl.contexts.dict = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, + sizeof(struct context_v2_entry)); + + dictionary_register_conflict_callback(ctl.contexts.dict, contexts_conflict_callback, &ctl); + dictionary_register_delete_callback(ctl.contexts.dict, contexts_delete_callback, &ctl); + } + + if(mode & CONTEXTS_V2_FUNCTIONS) { + ctl.functions.dict = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, + sizeof(struct function_v2_entry)); + + dictionary_register_insert_callback(ctl.functions.dict, functions_insert_callback, &ctl); + dictionary_register_conflict_callback(ctl.functions.dict, functions_conflict_callback, &ctl); + dictionary_register_delete_callback(ctl.functions.dict, functions_delete_callback, &ctl); + } + + if(mode & CONTEXTS_V2_ALERTS) { + if(!rrdcontexts_v2_init_alert_dictionaries(&ctl, req)) { + resp = HTTP_RESP_NOT_FOUND; + goto cleanup; + } + } + + if(req->after || req->before) { + ctl.window.relative = rrdr_relative_window_to_absolute_query( + &ctl.window.after, &ctl.window.before, &ctl.now, false); + + ctl.window.enabled = !(mode & CONTEXTS_V2_ALERT_TRANSITIONS); + } + else + ctl.now = now_realtime_sec(); + + buffer_json_initialize(wb, "\"", "\"", 0, true, + ((req->options & CONTEXTS_OPTION_MINIFY) && !(req->options & CONTEXTS_OPTION_DEBUG)) ? BUFFER_JSON_OPTIONS_MINIFY : BUFFER_JSON_OPTIONS_DEFAULT); + + buffer_json_member_add_uint64(wb, "api", 2); + + if(req->options & CONTEXTS_OPTION_DEBUG) { + buffer_json_member_add_object(wb, "request"); + { + buffer_json_contexts_v2_mode_to_array(wb, "mode", mode); + contexts_options_to_buffer_json_array(wb, "options", req->options); + + buffer_json_member_add_object(wb, "scope"); + { + buffer_json_member_add_string(wb, "scope_nodes", req->scope_nodes); + if (mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS)) + buffer_json_member_add_string(wb, "scope_contexts", req->scope_contexts); + } + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, "selectors"); + { + buffer_json_member_add_string(wb, "nodes", req->nodes); + + if (mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS)) + buffer_json_member_add_string(wb, "contexts", req->contexts); + + if(mode & (CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) { + buffer_json_member_add_object(wb, "alerts"); + + if(mode & CONTEXTS_V2_ALERTS) + contexts_alerts_status_to_buffer_json_array(wb, "status", req->alerts.status); + + if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { + buffer_json_member_add_string(wb, "context", req->contexts); + buffer_json_member_add_uint64(wb, "anchor_gi", req->alerts.global_id_anchor); + buffer_json_member_add_uint64(wb, "last", req->alerts.last); + } + + buffer_json_member_add_string(wb, "alert", req->alerts.alert); + buffer_json_member_add_string(wb, "transition", req->alerts.transition); + buffer_json_object_close(wb); // alerts + } + } + buffer_json_object_close(wb); // selectors + + buffer_json_member_add_object(wb, "filters"); + { + if (mode & CONTEXTS_V2_SEARCH) + buffer_json_member_add_string(wb, "q", req->q); + + buffer_json_member_add_time_t(wb, "after", req->after); + buffer_json_member_add_time_t(wb, "before", req->before); + } + buffer_json_object_close(wb); // filters + + if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { + buffer_json_member_add_object(wb, "facets"); + { + for (int i = 0; i < ATF_TOTAL_ENTRIES; i++) { + buffer_json_member_add_string(wb, alert_transition_facets[i].query_param, req->alerts.facets[i]); + } + } + buffer_json_object_close(wb); // facets + } + } + buffer_json_object_close(wb); + } + + ssize_t ret = 0; + if(run) + ret = query_scope_foreach_host(ctl.nodes.scope_pattern, ctl.nodes.pattern, + rrdcontext_to_json_v2_add_host, &ctl, + &ctl.versions, ctl.q.host_node_id_str); + + if(unlikely(ret < 0)) { + buffer_flush(wb); + + if(ret == -2) { + buffer_strcat(wb, "query timeout"); + resp = HTTP_RESP_GATEWAY_TIMEOUT; + } + else { + buffer_strcat(wb, "query interrupted"); + resp = HTTP_RESP_CLIENT_CLOSED_REQUEST; + } + goto cleanup; + } + + ctl.timings.executed_ut = now_monotonic_usec(); + + if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { + contexts_v2_alert_transitions_to_json(wb, &ctl, debug); + } + else { + if (mode & CONTEXTS_V2_NODES) { + buffer_json_member_add_array(wb, "nodes"); + struct contexts_v2_node *t; + dfe_start_read(ctl.nodes.dict, t) { + rrdcontext_to_json_v2_rrdhost(wb, t->host, &ctl, t->ni); + } + dfe_done(t); + buffer_json_array_close(wb); + } + + if (mode & CONTEXTS_V2_FUNCTIONS) { + buffer_json_member_add_array(wb, "functions"); + { + struct function_v2_entry *t; + dfe_start_read(ctl.functions.dict, t) { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "name", t_dfe.name); + buffer_json_member_add_string(wb, "help", string2str(t->help)); + buffer_json_member_add_array(wb, "ni"); + { + for (size_t i = 0; i < t->used; i++) + buffer_json_add_array_item_uint64(wb, t->node_ids[i]); + } + buffer_json_array_close(wb); + buffer_json_member_add_string(wb, "tags", string2str(t->tags)); + http_access2buffer_json_array(wb, "access", t->access); + buffer_json_member_add_uint64(wb, "priority", t->priority); + } + buffer_json_object_close(wb); + } + dfe_done(t); + } + buffer_json_array_close(wb); + } + + if (mode & CONTEXTS_V2_CONTEXTS) { + buffer_json_member_add_object(wb, "contexts"); + { + struct context_v2_entry *z; + dfe_start_read(ctl.contexts.dict, z) { + bool collected = z->flags & RRD_FLAG_COLLECTED; + + buffer_json_member_add_object(wb, string2str(z->id)); + { + buffer_json_member_add_string(wb, "family", string2str(z->family)); + buffer_json_member_add_uint64(wb, "priority", z->priority); + buffer_json_member_add_time_t(wb, "first_entry", z->first_time_s); + buffer_json_member_add_time_t(wb, "last_entry", collected ? ctl.now : z->last_time_s); + buffer_json_member_add_boolean(wb, "live", collected); + if (mode & CONTEXTS_V2_SEARCH) + buffer_json_member_add_string(wb, "match", fts_match_to_string(z->match)); + } + buffer_json_object_close(wb); + } + dfe_done(z); + } + buffer_json_object_close(wb); // contexts + } + + if (mode & CONTEXTS_V2_ALERTS) + contexts_v2_alerts_to_json(wb, &ctl, debug); + + if (mode & CONTEXTS_V2_SEARCH) { + buffer_json_member_add_object(wb, "searches"); + { + buffer_json_member_add_uint64(wb, "strings", ctl.q.fts.string_searches); + buffer_json_member_add_uint64(wb, "char", ctl.q.fts.char_searches); + buffer_json_member_add_uint64(wb, "total", ctl.q.fts.searches); + } + buffer_json_object_close(wb); + } + + if (mode & (CONTEXTS_V2_VERSIONS)) + version_hashes_api_v2(wb, &ctl.versions); + + if (mode & CONTEXTS_V2_AGENTS) + buffer_json_agents_v2(wb, &ctl.timings, ctl.now, mode & (CONTEXTS_V2_AGENTS_INFO), true); + } + + buffer_json_cloud_timings(wb, "timings", &ctl.timings); + + buffer_json_finalize(wb); + +cleanup: + dictionary_destroy(ctl.nodes.dict); + dictionary_destroy(ctl.contexts.dict); + dictionary_destroy(ctl.functions.dict); + rrdcontexts_v2_alerts_cleanup(&ctl); + simple_pattern_free(ctl.nodes.scope_pattern); + simple_pattern_free(ctl.nodes.pattern); + simple_pattern_free(ctl.contexts.pattern); + simple_pattern_free(ctl.contexts.scope_pattern); + simple_pattern_free(ctl.q.pattern); + simple_pattern_free(ctl.alerts.alert_name_pattern); + + return resp; +} diff --git a/src/database/contexts/api_v2_contexts.h b/src/database/contexts/api_v2_contexts.h new file mode 100644 index 00000000000000..3fb5354b9e8722 --- /dev/null +++ b/src/database/contexts/api_v2_contexts.h @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_V2_CONTEXTS_H +#define NETDATA_API_V2_CONTEXTS_H + +#include "internal.h" + +typedef enum __attribute__ ((__packed__)) { + FTS_MATCHED_NONE = 0, + FTS_MATCHED_HOST, + FTS_MATCHED_CONTEXT, + FTS_MATCHED_INSTANCE, + FTS_MATCHED_DIMENSION, + FTS_MATCHED_LABEL, + FTS_MATCHED_ALERT, + FTS_MATCHED_ALERT_INFO, + FTS_MATCHED_FAMILY, + FTS_MATCHED_TITLE, + FTS_MATCHED_UNITS, +} FTS_MATCH; + +typedef struct full_text_search_index { + size_t searches; + size_t string_searches; + size_t char_searches; +} FTS_INDEX; + +struct contexts_v2_node { + size_t ni; + RRDHOST *host; +}; + +struct rrdcontext_to_json_v2_data { + time_t now; + + BUFFER *wb; + struct api_v2_contexts_request *request; + + CONTEXTS_V2_MODE mode; + CONTEXTS_OPTIONS options; + struct query_versions versions; + + struct { + SIMPLE_PATTERN *scope_pattern; + SIMPLE_PATTERN *pattern; + size_t ni; + DICTIONARY *dict; // the result set + } nodes; + + struct { + SIMPLE_PATTERN *scope_pattern; + SIMPLE_PATTERN *pattern; + size_t ci; + DICTIONARY *dict; // the result set + } contexts; + + struct { + SIMPLE_PATTERN *alert_name_pattern; + time_t alarm_id_filter; + + size_t ati; + + DICTIONARY *summary; + DICTIONARY *alert_instances; + + DICTIONARY *by_type; + DICTIONARY *by_component; + DICTIONARY *by_classification; + DICTIONARY *by_recipient; + DICTIONARY *by_module; + } alerts; + + struct { + FTS_MATCH host_match; + char host_node_id_str[UUID_STR_LEN]; + SIMPLE_PATTERN *pattern; + FTS_INDEX fts; + } q; + + struct { + DICTIONARY *dict; // the result set + } functions; + + struct { + bool enabled; + bool relative; + time_t after; + time_t before; + } window; + + struct query_timings timings; +}; + +void agent_capabilities_to_json(BUFFER *wb, RRDHOST *host, const char *key); + +#include "api_v2_contexts_alerts.h" + +#endif //NETDATA_API_V2_CONTEXTS_H diff --git a/src/database/contexts/api_v2_contexts_agents.c b/src/database/contexts/api_v2_contexts_agents.c new file mode 100644 index 00000000000000..ba9cc450591b21 --- /dev/null +++ b/src/database/contexts/api_v2_contexts_agents.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_contexts.h" +#include "aclk/aclk_capas.h" + +void build_info_to_json_object(BUFFER *b); + +static void convert_seconds_to_dhms(time_t seconds, char *result, int result_size) { + int days, hours, minutes; + + days = (int) (seconds / (24 * 3600)); + seconds = (int) (seconds % (24 * 3600)); + hours = (int) (seconds / 3600); + seconds %= 3600; + minutes = (int) (seconds / 60); + seconds %= 60; + + // Format the result into the provided string buffer + BUFFER *buf = buffer_create(128, NULL); + if (days) + buffer_sprintf(buf,"%d day%s%s", days, days==1 ? "" : "s", hours || minutes ? ", " : ""); + if (hours) + buffer_sprintf(buf,"%d hour%s%s", hours, hours==1 ? "" : "s", minutes ? ", " : ""); + if (minutes) + buffer_sprintf(buf,"%d minute%s%s", minutes, minutes==1 ? "" : "s", seconds ? ", " : ""); + if (seconds) + buffer_sprintf(buf,"%d second%s", (int) seconds, seconds==1 ? "" : "s"); + strncpyz(result, buffer_tostring(buf), result_size); + buffer_free(buf); +} + +void buffer_json_agents_v2(BUFFER *wb, struct query_timings *timings, time_t now_s, bool info, bool array) { + if(!now_s) + now_s = now_realtime_sec(); + + if(array) { + buffer_json_member_add_array(wb, "agents"); + buffer_json_add_array_item_object(wb); + } + else + buffer_json_member_add_object(wb, "agent"); + + buffer_json_member_add_string(wb, "mg", localhost->machine_guid); + buffer_json_member_add_uuid(wb, "nd", localhost->node_id); + buffer_json_member_add_string(wb, "nm", rrdhost_hostname(localhost)); + buffer_json_member_add_time_t(wb, "now", now_s); + + if(array) + buffer_json_member_add_uint64(wb, "ai", 0); + + if(info) { + buffer_json_member_add_object(wb, "application"); + build_info_to_json_object(wb); + buffer_json_object_close(wb); // application + + buffer_json_cloud_status(wb, now_s); + + buffer_json_member_add_object(wb, "nodes"); + { + size_t receiving = 0, archived = 0, sending = 0, total = 0; + RRDHOST *host; + dfe_start_read(rrdhost_root_index, host) { + total++; + + if(host == localhost) + continue; + + if(rrdhost_state_cloud_emulation(host)) + receiving++; + else + archived++; + + if(rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED)) + sending++; + } + dfe_done(host); + + buffer_json_member_add_uint64(wb, "total", total); + buffer_json_member_add_uint64(wb, "receiving", receiving); + buffer_json_member_add_uint64(wb, "sending", sending); + buffer_json_member_add_uint64(wb, "archived", archived); + } + buffer_json_object_close(wb); // nodes + + agent_capabilities_to_json(wb, localhost, "capabilities"); + + buffer_json_member_add_object(wb, "api"); + { + buffer_json_member_add_uint64(wb, "version", aclk_get_http_api_version()); + buffer_json_member_add_boolean(wb, "bearer_protection", netdata_is_protected_by_bearer); + } + buffer_json_object_close(wb); // api + + buffer_json_member_add_array(wb, "db_size"); + size_t group_seconds = localhost->rrd_update_every; + for (size_t tier = 0; tier < storage_tiers; tier++) { + STORAGE_ENGINE *eng = localhost->db[tier].eng; + if (!eng) continue; + + group_seconds *= storage_tiers_grouping_iterations[tier]; + uint64_t max = storage_engine_disk_space_max(eng->seb, localhost->db[tier].si); + uint64_t used = storage_engine_disk_space_used(eng->seb, localhost->db[tier].si); +#ifdef ENABLE_DBENGINE + if (!max && eng->seb == STORAGE_ENGINE_BACKEND_DBENGINE) { + max = get_directory_free_bytes_space(multidb_ctx[tier]); + max += used; + } +#endif + time_t first_time_s = storage_engine_global_first_time_s(eng->seb, localhost->db[tier].si); + size_t currently_collected_metrics = storage_engine_collected_metrics(eng->seb, localhost->db[tier].si); + + NETDATA_DOUBLE percent; + if (used && max) + percent = (NETDATA_DOUBLE) used * 100.0 / (NETDATA_DOUBLE) max; + else + percent = 0.0; + + buffer_json_add_array_item_object(wb); + buffer_json_member_add_uint64(wb, "tier", tier); + char human_retention[128]; + convert_seconds_to_dhms((time_t) group_seconds, human_retention, sizeof(human_retention) - 1); + buffer_json_member_add_string(wb, "point_every", human_retention); + + buffer_json_member_add_uint64(wb, "metrics", storage_engine_metrics(eng->seb, localhost->db[tier].si)); + buffer_json_member_add_uint64(wb, "samples", storage_engine_samples(eng->seb, localhost->db[tier].si)); + + if(used || max) { + buffer_json_member_add_uint64(wb, "disk_used", used); + buffer_json_member_add_uint64(wb, "disk_max", max); + buffer_json_member_add_double(wb, "disk_percent", percent); + } + + if(first_time_s) { + time_t retention = now_s - first_time_s; + + buffer_json_member_add_time_t(wb, "from", first_time_s); + buffer_json_member_add_time_t(wb, "to", now_s); + buffer_json_member_add_time_t(wb, "retention", retention); + + convert_seconds_to_dhms(retention, human_retention, sizeof(human_retention) - 1); + buffer_json_member_add_string(wb, "retention_human", human_retention); + + if(used || max) { // we have disk space information + time_t time_retention = 0; +#ifdef ENABLE_DBENGINE + time_retention = multidb_ctx[tier]->config.max_retention_s; +#endif + time_t space_retention = (time_t)((NETDATA_DOUBLE)(now_s - first_time_s) * 100.0 / percent); + time_t actual_retention = MIN(space_retention, time_retention ? time_retention : space_retention); + + if (time_retention) { + convert_seconds_to_dhms(time_retention, human_retention, sizeof(human_retention) - 1); + buffer_json_member_add_time_t(wb, "requested_retention", time_retention); + buffer_json_member_add_string(wb, "requested_retention_human", human_retention); + } + + convert_seconds_to_dhms(actual_retention, human_retention, sizeof(human_retention) - 1); + buffer_json_member_add_time_t(wb, "expected_retention", actual_retention); + buffer_json_member_add_string(wb, "expected_retention_human", human_retention); + } + } + + if(currently_collected_metrics) + buffer_json_member_add_uint64(wb, "currently_collected_metrics", currently_collected_metrics); + + buffer_json_object_close(wb); + } + buffer_json_array_close(wb); // db_size + } + + if(timings) + buffer_json_query_timings(wb, "timings", timings); + + buffer_json_object_close(wb); + + if(array) + buffer_json_array_close(wb); +} diff --git a/src/database/contexts/api_v2_contexts_alert_config.c b/src/database/contexts/api_v2_contexts_alert_config.c new file mode 100644 index 00000000000000..cd3d8fc143d2de --- /dev/null +++ b/src/database/contexts/api_v2_contexts_alert_config.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_contexts_alerts.h" + +void contexts_v2_alert_config_to_json_from_sql_alert_config_data(struct sql_alert_config_data *t, void *data) { + struct alert_transitions_callback_data *d = data; + BUFFER *wb = d->wb; + bool debug = d->debug; + d->configs_added++; + + if(d->only_one_config) + buffer_json_add_array_item_object(wb); // alert config + + { + buffer_json_member_add_string(wb, "name", t->name); + buffer_json_member_add_uuid_ptr(wb, "config_hash_id", t->config_hash_id); + + buffer_json_member_add_object(wb, "selectors"); + { + bool is_template = t->selectors.on_template && *t->selectors.on_template ? true : false; + buffer_json_member_add_string(wb, "type", is_template ? "template" : "alarm"); + buffer_json_member_add_string(wb, "on", is_template ? t->selectors.on_template : t->selectors.on_key); + + buffer_json_member_add_string(wb, "families", t->selectors.families); + buffer_json_member_add_string(wb, "host_labels", t->selectors.host_labels); + buffer_json_member_add_string(wb, "chart_labels", t->selectors.chart_labels); + } + buffer_json_object_close(wb); // selectors + + buffer_json_member_add_object(wb, "value"); // value + { + // buffer_json_member_add_string(wb, "every", t->value.every); // does not exist in Netdata Cloud + buffer_json_member_add_string(wb, "units", t->value.units); + buffer_json_member_add_uint64(wb, "update_every", t->value.update_every); + + if (t->value.db.after || debug) { + buffer_json_member_add_object(wb, "db"); + { + // buffer_json_member_add_string(wb, "lookup", t->value.db.lookup); // does not exist in Netdata Cloud + + buffer_json_member_add_time_t(wb, "after", t->value.db.after); + buffer_json_member_add_time_t(wb, "before", t->value.db.before); + buffer_json_member_add_string(wb, "time_group_condition", alerts_group_conditions_id2txt(t->value.db.time_group_condition)); + buffer_json_member_add_double(wb, "time_group_value", t->value.db.time_group_value); + buffer_json_member_add_string(wb, "dims_group", alerts_dims_grouping_id2group(t->value.db.dims_group)); + buffer_json_member_add_string(wb, "data_source", alerts_data_source_id2source(t->value.db.data_source)); + buffer_json_member_add_string(wb, "method", t->value.db.method); + buffer_json_member_add_string(wb, "dimensions", t->value.db.dimensions); + rrdr_options_to_buffer_json_array(wb, "options", (RRDR_OPTIONS)t->value.db.options); + } + buffer_json_object_close(wb); // db + } + + if (t->value.calc || debug) + buffer_json_member_add_string(wb, "calc", t->value.calc); + } + buffer_json_object_close(wb); // value + + if (t->status.warn || t->status.crit || debug) { + buffer_json_member_add_object(wb, "status"); // status + { + NETDATA_DOUBLE green = t->status.green ? str2ndd(t->status.green, NULL) : NAN; + NETDATA_DOUBLE red = t->status.red ? str2ndd(t->status.red, NULL) : NAN; + + if (!isnan(green) || debug) + buffer_json_member_add_double(wb, "green", green); + + if (!isnan(red) || debug) + buffer_json_member_add_double(wb, "red", red); + + if (t->status.warn || debug) + buffer_json_member_add_string(wb, "warn", t->status.warn); + + if (t->status.crit || debug) + buffer_json_member_add_string(wb, "crit", t->status.crit); + } + buffer_json_object_close(wb); // status + } + + buffer_json_member_add_object(wb, "notification"); + { + buffer_json_member_add_string(wb, "type", "agent"); + buffer_json_member_add_string(wb, "exec", t->notification.exec ? t->notification.exec : NULL); + buffer_json_member_add_string(wb, "to", t->notification.to_key ? t->notification.to_key : string2str(localhost->health.health_default_recipient)); + buffer_json_member_add_string(wb, "delay", t->notification.delay); + buffer_json_member_add_string(wb, "repeat", t->notification.repeat); + buffer_json_member_add_string(wb, "options", t->notification.options); + } + buffer_json_object_close(wb); // notification + + buffer_json_member_add_string(wb, "class", t->classification); + buffer_json_member_add_string(wb, "component", t->component); + buffer_json_member_add_string(wb, "type", t->type); + buffer_json_member_add_string(wb, "info", t->info); + buffer_json_member_add_string(wb, "summary", t->summary); + // buffer_json_member_add_string(wb, "source", t->source); // moved to alert instance + } + + if(d->only_one_config) + buffer_json_object_close(wb); +} + +int contexts_v2_alert_config_to_json(struct web_client *w, const char *config_hash_id) { + struct alert_transitions_callback_data data = { + .wb = w->response.data, + .debug = false, + .only_one_config = false, + }; + DICTIONARY *configs = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE); + dictionary_set(configs, config_hash_id, NULL, 0); + + buffer_flush(w->response.data); + + buffer_json_initialize(w->response.data, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + int added = sql_get_alert_configuration(configs, contexts_v2_alert_config_to_json_from_sql_alert_config_data, &data, false); + buffer_json_finalize(w->response.data); + + int ret = HTTP_RESP_OK; + + if(added <= 0) { + buffer_flush(w->response.data); + w->response.data->content_type = CT_TEXT_PLAIN; + if(added < 0) { + buffer_strcat(w->response.data, "Failed to execute SQL query."); + ret = HTTP_RESP_INTERNAL_SERVER_ERROR; + } + else { + buffer_strcat(w->response.data, "Config is not found."); + ret = HTTP_RESP_NOT_FOUND; + } + } + + return ret; +} diff --git a/src/database/contexts/api_v2_contexts_alert_transitions.c b/src/database/contexts/api_v2_contexts_alert_transitions.c new file mode 100644 index 00000000000000..60ae81035f5d56 --- /dev/null +++ b/src/database/contexts/api_v2_contexts_alert_transitions.c @@ -0,0 +1,487 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_contexts_alerts.h" + +struct alert_transitions_facets alert_transition_facets[] = { + [ATF_STATUS] = { + .id = "f_status", + .name = "Alert Status", + .query_param = "f_status", + .order = 1, + }, + [ATF_TYPE] = { + .id = "f_type", + .name = "Alert Type", + .query_param = "f_type", + .order = 2, + }, + [ATF_ROLE] = { + .id = "f_role", + .name = "Recipient Role", + .query_param = "f_role", + .order = 3, + }, + [ATF_CLASS] = { + .id = "f_class", + .name = "Alert Class", + .query_param = "f_class", + .order = 4, + }, + [ATF_COMPONENT] = { + .id = "f_component", + .name = "Alert Component", + .query_param = "f_component", + .order = 5, + }, + [ATF_NODE] = { + .id = "f_node", + .name = "Alert Node", + .query_param = "f_node", + .order = 6, + }, + [ATF_ALERT_NAME] = { + .id = "f_alert", + .name = "Alert Name", + .query_param = "f_alert", + .order = 7, + }, + [ATF_CHART_NAME] = { + .id = "f_instance", + .name = "Instance Name", + .query_param = "f_instance", + .order = 8, + }, + [ATF_CONTEXT] = { + .id = "f_context", + .name = "Context", + .query_param = "f_context", + .order = 9, + }, + + // terminator + [ATF_TOTAL_ENTRIES] = { + .id = NULL, + .name = NULL, + .query_param = NULL, + .order = 9999, + } +}; + +#define SQL_TRANSITION_DATA_SMALL_STRING (6 * 8) +#define SQL_TRANSITION_DATA_MEDIUM_STRING (12 * 8) +#define SQL_TRANSITION_DATA_BIG_STRING 512 + +struct sql_alert_transition_fixed_size { + usec_t global_id; + nd_uuid_t transition_id; + nd_uuid_t host_id; + nd_uuid_t config_hash_id; + uint32_t alarm_id; + char alert_name[SQL_TRANSITION_DATA_SMALL_STRING]; + char chart[RRD_ID_LENGTH_MAX]; + char chart_name[RRD_ID_LENGTH_MAX]; + char chart_context[SQL_TRANSITION_DATA_MEDIUM_STRING]; + char family[SQL_TRANSITION_DATA_SMALL_STRING]; + char recipient[SQL_TRANSITION_DATA_MEDIUM_STRING]; + char units[SQL_TRANSITION_DATA_SMALL_STRING]; + char exec[SQL_TRANSITION_DATA_BIG_STRING]; + char info[SQL_TRANSITION_DATA_BIG_STRING]; + char summary[SQL_TRANSITION_DATA_BIG_STRING]; + char classification[SQL_TRANSITION_DATA_SMALL_STRING]; + char type[SQL_TRANSITION_DATA_SMALL_STRING]; + char component[SQL_TRANSITION_DATA_SMALL_STRING]; + time_t when_key; + time_t duration; + time_t non_clear_duration; + uint64_t flags; + time_t delay_up_to_timestamp; + time_t exec_run_timestamp; + int exec_code; + int new_status; + int old_status; + int delay; + time_t last_repeat; + NETDATA_DOUBLE new_value; + NETDATA_DOUBLE old_value; + + char machine_guid[UUID_STR_LEN]; + struct sql_alert_transition_fixed_size *next; + struct sql_alert_transition_fixed_size *prev; +}; + +struct facet_entry { + uint32_t count; +}; + +static struct sql_alert_transition_fixed_size *contexts_v2_alert_transition_dup(struct sql_alert_transition_data *t, const char *machine_guid, struct sql_alert_transition_fixed_size *dst) { + struct sql_alert_transition_fixed_size *n = dst ? dst : mallocz(sizeof(*n)); + + n->global_id = t->global_id; + uuid_copy(n->transition_id, *t->transition_id); + uuid_copy(n->host_id, *t->host_id); + uuid_copy(n->config_hash_id, *t->config_hash_id); + n->alarm_id = t->alarm_id; + strncpyz(n->alert_name, t->alert_name ? t->alert_name : "", sizeof(n->alert_name) - 1); + strncpyz(n->chart, t->chart ? t->chart : "", sizeof(n->chart) - 1); + strncpyz(n->chart_name, t->chart_name ? t->chart_name : n->chart, sizeof(n->chart_name) - 1); + strncpyz(n->chart_context, t->chart_context ? t->chart_context : "", sizeof(n->chart_context) - 1); + strncpyz(n->family, t->family ? t->family : "", sizeof(n->family) - 1); + strncpyz(n->recipient, t->recipient ? t->recipient : "", sizeof(n->recipient) - 1); + strncpyz(n->units, t->units ? t->units : "", sizeof(n->units) - 1); + strncpyz(n->exec, t->exec ? t->exec : "", sizeof(n->exec) - 1); + strncpyz(n->info, t->info ? t->info : "", sizeof(n->info) - 1); + strncpyz(n->summary, t->summary ? t->summary : "", sizeof(n->summary) - 1); + strncpyz(n->classification, t->classification ? t->classification : "", sizeof(n->classification) - 1); + strncpyz(n->type, t->type ? t->type : "", sizeof(n->type) - 1); + strncpyz(n->component, t->component ? t->component : "", sizeof(n->component) - 1); + n->when_key = t->when_key; + n->duration = t->duration; + n->non_clear_duration = t->non_clear_duration; + n->flags = t->flags; + n->delay_up_to_timestamp = t->delay_up_to_timestamp; + n->exec_run_timestamp = t->exec_run_timestamp; + n->exec_code = t->exec_code; + n->new_status = t->new_status; + n->old_status = t->old_status; + n->delay = t->delay; + n->last_repeat = t->last_repeat; + n->new_value = t->new_value; + n->old_value = t->old_value; + + memcpy(n->machine_guid, machine_guid, sizeof(n->machine_guid)); + n->next = n->prev = NULL; + + return n; +} + +static void contexts_v2_alert_transition_free(struct sql_alert_transition_fixed_size *t) { + freez(t); +} + +static inline void contexts_v2_alert_transition_keep(struct alert_transitions_callback_data *d, struct sql_alert_transition_data *t, const char *machine_guid) { + d->items_matched++; + + if(unlikely(t->global_id <= d->ctl->request->alerts.global_id_anchor)) { + // this is in our past, we are not interested + d->operations.skips_before++; + return; + } + + if(unlikely(!d->base)) { + d->last_added = contexts_v2_alert_transition_dup(t, machine_guid, NULL); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(d->base, d->last_added, prev, next); + d->items_to_return++; + d->operations.first++; + return; + } + + struct sql_alert_transition_fixed_size *last = d->last_added; + while(last->prev != d->base->prev && t->global_id > last->prev->global_id) { + last = last->prev; + d->operations.backwards++; + } + + while(last->next && t->global_id < last->next->global_id) { + last = last->next; + d->operations.forwards++; + } + + if(d->items_to_return >= d->max_items_to_return) { + if(last == d->base->prev && t->global_id < last->global_id) { + d->operations.skips_after++; + return; + } + } + + d->items_to_return++; + + if(t->global_id > last->global_id) { + if(d->items_to_return > d->max_items_to_return) { + d->items_to_return--; + d->operations.shifts++; + d->last_added = d->base->prev; + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(d->base, d->last_added, prev, next); + d->last_added = contexts_v2_alert_transition_dup(t, machine_guid, d->last_added); + } + DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(d->base, d->last_added, prev, next); + d->operations.prepend++; + } + else { + d->last_added = contexts_v2_alert_transition_dup(t, machine_guid, NULL); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(d->base, d->last_added, prev, next); + d->operations.append++; + } + + while(d->items_to_return > d->max_items_to_return) { + // we have to remove something + + struct sql_alert_transition_fixed_size *tmp = d->base->prev; + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(d->base, tmp, prev, next); + d->items_to_return--; + + if(unlikely(d->last_added == tmp)) + d->last_added = d->base; + + contexts_v2_alert_transition_free(tmp); + + d->operations.shifts++; + } +} + +static void contexts_v2_alert_transition_callback(struct sql_alert_transition_data *t, void *data) { + struct alert_transitions_callback_data *d = data; + d->items_evaluated++; + + char machine_guid[UUID_STR_LEN] = ""; + uuid_unparse_lower(*t->host_id, machine_guid); + + const char *facets[ATF_TOTAL_ENTRIES] = { + [ATF_STATUS] = rrdcalc_status2string(t->new_status), + [ATF_CLASS] = t->classification, + [ATF_TYPE] = t->type, + [ATF_COMPONENT] = t->component, + [ATF_ROLE] = t->recipient && *t->recipient ? t->recipient : string2str(localhost->health.health_default_recipient), + [ATF_NODE] = machine_guid, + [ATF_ALERT_NAME] = t->alert_name, + [ATF_CHART_NAME] = t->chart_name, + [ATF_CONTEXT] = t->chart_context, + }; + + for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { + if (!facets[i] || !*facets[i]) facets[i] = "unknown"; + + struct facet_entry tmp = { + .count = 0, + }; + dictionary_set(d->facets[i].dict, facets[i], &tmp, sizeof(tmp)); + } + + bool selected[ATF_TOTAL_ENTRIES] = { 0 }; + + uint32_t selected_by = 0; + for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { + selected[i] = !d->facets[i].pattern || simple_pattern_matches(d->facets[i].pattern, facets[i]); + if(selected[i]) + selected_by++; + } + + if(selected_by == ATF_TOTAL_ENTRIES) { + // this item is selected by all facets + // put it in our result (if it fits) + contexts_v2_alert_transition_keep(d, t, machine_guid); + } + + if(selected_by >= ATF_TOTAL_ENTRIES - 1) { + // this item is selected by all, or all except one facet + // in both cases we need to add it to our counters + + for (size_t i = 0; i < ATF_TOTAL_ENTRIES; i++) { + uint32_t counted_by = selected_by; + + if (counted_by != ATF_TOTAL_ENTRIES) { + counted_by = 0; + for (size_t j = 0; j < ATF_TOTAL_ENTRIES; j++) { + if (i == j || selected[j]) + counted_by++; + } + } + + if (counted_by == ATF_TOTAL_ENTRIES) { + // we need to count it on this facet + struct facet_entry *x = dictionary_get(d->facets[i].dict, facets[i]); + internal_fatal(!x, "facet is not found"); + if(x) + x->count++; + } + } + } +} + +void contexts_v2_alert_transitions_to_json(BUFFER *wb, struct rrdcontext_to_json_v2_data *ctl, bool debug) { + struct alert_transitions_callback_data data = { + .wb = wb, + .ctl = ctl, + .debug = debug, + .only_one_config = true, + .max_items_to_return = ctl->request->alerts.last, + .items_to_return = 0, + .base = NULL, + }; + + for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { + data.facets[i].dict = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_FIXED_SIZE | DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, sizeof(struct facet_entry)); + if(ctl->request->alerts.facets[i]) + data.facets[i].pattern = simple_pattern_create(ctl->request->alerts.facets[i], ",|", SIMPLE_PATTERN_EXACT, false); + } + + sql_alert_transitions( + ctl->nodes.dict, + ctl->window.after, + ctl->window.before, + ctl->request->contexts, + ctl->request->alerts.alert, + ctl->request->alerts.transition, + contexts_v2_alert_transition_callback, + &data, + debug); + + buffer_json_member_add_array(wb, "facets"); + for (size_t i = 0; i < ATF_TOTAL_ENTRIES; i++) { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "id", alert_transition_facets[i].id); + buffer_json_member_add_string(wb, "name", alert_transition_facets[i].name); + buffer_json_member_add_uint64(wb, "order", alert_transition_facets[i].order); + buffer_json_member_add_array(wb, "options"); + { + struct facet_entry *x; + dfe_start_read(data.facets[i].dict, x) { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "id", x_dfe.name); + if (i == ATF_NODE) { + RRDHOST *host = rrdhost_find_by_guid(x_dfe.name); + if (host) + buffer_json_member_add_string(wb, "name", rrdhost_hostname(host)); + else + buffer_json_member_add_string(wb, "name", x_dfe.name); + } else + buffer_json_member_add_string(wb, "name", x_dfe.name); + buffer_json_member_add_uint64(wb, "count", x->count); + } + buffer_json_object_close(wb); + } + dfe_done(x); + } + buffer_json_array_close(wb); // options + } + buffer_json_object_close(wb); // facet + } + buffer_json_array_close(wb); // facets + + buffer_json_member_add_array(wb, "transitions"); + for(struct sql_alert_transition_fixed_size *t = data.base; t ; t = t->next) { + buffer_json_add_array_item_object(wb); + { + RRDHOST *host = rrdhost_find_by_guid(t->machine_guid); + + buffer_json_member_add_uint64(wb, "gi", t->global_id); + buffer_json_member_add_uuid(wb, "transition_id", t->transition_id); + buffer_json_member_add_uuid(wb, "config_hash_id", t->config_hash_id); + buffer_json_member_add_string(wb, "machine_guid", t->machine_guid); + + if(host) { + buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(host)); + + if(!uuid_is_null(host->node_id)) + buffer_json_member_add_uuid(wb, "node_id", host->node_id); + } + + buffer_json_member_add_string(wb, "alert", *t->alert_name ? t->alert_name : NULL); + buffer_json_member_add_string(wb, "instance", *t->chart ? t->chart : NULL); + buffer_json_member_add_string(wb, "instance_n", *t->chart_name ? t->chart_name : NULL); + buffer_json_member_add_string(wb, "context", *t->chart_context ? t->chart_context : NULL); + // buffer_json_member_add_string(wb, "family", *t->family ? t->family : NULL); + buffer_json_member_add_string(wb, "component", *t->component ? t->component : NULL); + buffer_json_member_add_string(wb, "classification", *t->classification ? t->classification : NULL); + buffer_json_member_add_string(wb, "type", *t->type ? t->type : NULL); + + buffer_json_member_add_time_t(wb, "when", t->when_key); + buffer_json_member_add_string(wb, "info", *t->info ? t->info : ""); + buffer_json_member_add_string(wb, "summary", *t->summary ? t->summary : ""); + buffer_json_member_add_string(wb, "units", *t->units ? t->units : NULL); + buffer_json_member_add_object(wb, "new"); + { + buffer_json_member_add_string(wb, "status", rrdcalc_status2string(t->new_status)); + buffer_json_member_add_double(wb, "value", t->new_value); + } + buffer_json_object_close(wb); // new + buffer_json_member_add_object(wb, "old"); + { + buffer_json_member_add_string(wb, "status", rrdcalc_status2string(t->old_status)); + buffer_json_member_add_double(wb, "value", t->old_value); + buffer_json_member_add_time_t(wb, "duration", t->duration); + buffer_json_member_add_time_t(wb, "raised_duration", t->non_clear_duration); + } + buffer_json_object_close(wb); // old + + buffer_json_member_add_object(wb, "notification"); + { + buffer_json_member_add_time_t(wb, "when", t->exec_run_timestamp); + buffer_json_member_add_time_t(wb, "delay", t->delay); + buffer_json_member_add_time_t(wb, "delay_up_to_time", t->delay_up_to_timestamp); + health_entry_flags_to_json_array(wb, "flags", t->flags); + buffer_json_member_add_string(wb, "exec", *t->exec ? t->exec : string2str(localhost->health.health_default_exec)); + buffer_json_member_add_uint64(wb, "exec_code", t->exec_code); + buffer_json_member_add_string(wb, "to", *t->recipient ? t->recipient : string2str(localhost->health.health_default_recipient)); + } + buffer_json_object_close(wb); // notification + } + buffer_json_object_close(wb); // a transition + } + buffer_json_array_close(wb); // all transitions + + if(ctl->options & CONTEXTS_OPTION_ALERTS_WITH_CONFIGURATIONS) { + DICTIONARY *configs = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE); + + for(struct sql_alert_transition_fixed_size *t = data.base; t ; t = t->next) { + char guid[UUID_STR_LEN]; + uuid_unparse_lower(t->config_hash_id, guid); + dictionary_set(configs, guid, NULL, 0); + } + + buffer_json_member_add_array(wb, "configurations"); + sql_get_alert_configuration(configs, contexts_v2_alert_config_to_json_from_sql_alert_config_data, &data, debug); + buffer_json_array_close(wb); + + dictionary_destroy(configs); + } + + while(data.base) { + struct sql_alert_transition_fixed_size *t = data.base; + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(data.base, t, prev, next); + contexts_v2_alert_transition_free(t); + } + + for(size_t i = 0; i < ATF_TOTAL_ENTRIES ;i++) { + dictionary_destroy(data.facets[i].dict); + simple_pattern_free(data.facets[i].pattern); + } + + buffer_json_member_add_object(wb, "items"); + { + // all the items in the window, under the scope_nodes, ignoring the facets (filters) + buffer_json_member_add_uint64(wb, "evaluated", data.items_evaluated); + + // all the items matching the query (if you didn't put anchor_gi and last, these are all the items you would get back) + buffer_json_member_add_uint64(wb, "matched", data.items_matched); + + // the items included in this response + buffer_json_member_add_uint64(wb, "returned", data.items_to_return); + + // same as last=X parameter + buffer_json_member_add_uint64(wb, "max_to_return", data.max_items_to_return); + + // items before the first returned, this should be 0 if anchor_gi is not set + buffer_json_member_add_uint64(wb, "before", data.operations.skips_before); + + // items after the last returned, when this is zero there aren't any items after the current list + buffer_json_member_add_uint64(wb, "after", data.operations.skips_after + data.operations.shifts); + } + buffer_json_object_close(wb); // items + + if(debug) { + buffer_json_member_add_object(wb, "stats"); + { + buffer_json_member_add_uint64(wb, "first", data.operations.first); + buffer_json_member_add_uint64(wb, "prepend", data.operations.prepend); + buffer_json_member_add_uint64(wb, "append", data.operations.append); + buffer_json_member_add_uint64(wb, "backwards", data.operations.backwards); + buffer_json_member_add_uint64(wb, "forwards", data.operations.forwards); + buffer_json_member_add_uint64(wb, "shifts", data.operations.shifts); + buffer_json_member_add_uint64(wb, "skips_before", data.operations.skips_before); + buffer_json_member_add_uint64(wb, "skips_after", data.operations.skips_after); + } + buffer_json_object_close(wb); + } +} diff --git a/src/database/contexts/api_v2_contexts_alerts.c b/src/database/contexts/api_v2_contexts_alerts.c new file mode 100644 index 00000000000000..b73dfa1f2f6779 --- /dev/null +++ b/src/database/contexts/api_v2_contexts_alerts.c @@ -0,0 +1,604 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_contexts.h" + +struct alert_counts { + size_t critical; + size_t warning; + size_t clear; + size_t error; +}; + +struct alert_v2_entry { + RRDCALC *tmp; + + STRING *name; + STRING *summary; + RRDLABELS *recipient; + RRDLABELS *classification; + RRDLABELS *context; + RRDLABELS *component; + RRDLABELS *type; + + size_t ati; + + struct alert_counts counts; + + size_t instances; + DICTIONARY *nodes; + DICTIONARY *configs; +}; + +struct alert_by_x_entry { + struct { + struct alert_counts counts; + size_t silent; + size_t total; + } running; + + struct { + size_t available; + } prototypes; +}; + +bool rrdcontext_matches_alert(struct rrdcontext_to_json_v2_data *ctl, RRDCONTEXT *rc) { + size_t matches = 0; + RRDINSTANCE *ri; + dfe_start_read(rc->rrdinstances, ri) { + if(ri->rrdset) { + RRDSET *st = ri->rrdset; + rw_spinlock_read_lock(&st->alerts.spinlock); + for (RRDCALC *rcl = st->alerts.base; rcl; rcl = rcl->next) { + if(ctl->alerts.alert_name_pattern && !simple_pattern_matches_string(ctl->alerts.alert_name_pattern, rcl->config.name)) + continue; + + if(ctl->alerts.alarm_id_filter && ctl->alerts.alarm_id_filter != rcl->id) + continue; + + size_t m = ctl->request->alerts.status & CONTEXTS_ALERT_STATUSES ? 0 : 1; + + if (!m) { + if ((ctl->request->alerts.status & CONTEXT_ALERT_UNINITIALIZED) && + rcl->status == RRDCALC_STATUS_UNINITIALIZED) + m++; + + if ((ctl->request->alerts.status & CONTEXT_ALERT_UNDEFINED) && + rcl->status == RRDCALC_STATUS_UNDEFINED) + m++; + + if ((ctl->request->alerts.status & CONTEXT_ALERT_CLEAR) && + rcl->status == RRDCALC_STATUS_CLEAR) + m++; + + if ((ctl->request->alerts.status & CONTEXT_ALERT_RAISED) && + rcl->status >= RRDCALC_STATUS_RAISED) + m++; + + if ((ctl->request->alerts.status & CONTEXT_ALERT_WARNING) && + rcl->status == RRDCALC_STATUS_WARNING) + m++; + + if ((ctl->request->alerts.status & CONTEXT_ALERT_CRITICAL) && + rcl->status == RRDCALC_STATUS_CRITICAL) + m++; + + if(!m) + continue; + } + + struct alert_v2_entry t = { + .tmp = rcl, + }; + struct alert_v2_entry *a2e = + dictionary_set(ctl->alerts.summary, string2str(rcl->config.name), + &t, sizeof(struct alert_v2_entry)); + size_t ati = a2e->ati; + matches++; + + dictionary_set_advanced(ctl->alerts.by_type, + string2str(rcl->config.type), + (ssize_t)string_strlen(rcl->config.type), + NULL, + sizeof(struct alert_by_x_entry), + rcl); + + dictionary_set_advanced(ctl->alerts.by_component, + string2str(rcl->config.component), + (ssize_t)string_strlen(rcl->config.component), + NULL, + sizeof(struct alert_by_x_entry), + rcl); + + dictionary_set_advanced(ctl->alerts.by_classification, + string2str(rcl->config.classification), + (ssize_t)string_strlen(rcl->config.classification), + NULL, + sizeof(struct alert_by_x_entry), + rcl); + + dictionary_set_advanced(ctl->alerts.by_recipient, + string2str(rcl->config.recipient), + (ssize_t)string_strlen(rcl->config.recipient), + NULL, + sizeof(struct alert_by_x_entry), + rcl); + + char *module = NULL; + rrdlabels_get_value_strdup_or_null(st->rrdlabels, &module, "_collect_module"); + if(!module || !*module) module = "[unset]"; + + dictionary_set_advanced(ctl->alerts.by_module, + module, + -1, + NULL, + sizeof(struct alert_by_x_entry), + rcl); + + if (ctl->options & (CONTEXTS_OPTION_ALERTS_WITH_INSTANCES | CONTEXTS_OPTION_ALERTS_WITH_VALUES)) { + char key[20 + 1]; + snprintfz(key, sizeof(key) - 1, "%p", rcl); + + struct sql_alert_instance_v2_entry z = { + .ati = ati, + .tmp = rcl, + }; + dictionary_set(ctl->alerts.alert_instances, key, &z, sizeof(z)); + } + } + rw_spinlock_read_unlock(&st->alerts.spinlock); + } + } + dfe_done(ri); + + return matches != 0; +} + +static void alert_counts_add(struct alert_counts *t, RRDCALC *rc) { + switch(rc->status) { + case RRDCALC_STATUS_CRITICAL: + t->critical++; + break; + + case RRDCALC_STATUS_WARNING: + t->warning++; + break; + + case RRDCALC_STATUS_CLEAR: + t->clear++; + break; + + case RRDCALC_STATUS_REMOVED: + case RRDCALC_STATUS_UNINITIALIZED: + break; + + case RRDCALC_STATUS_UNDEFINED: + default: + if(!netdata_double_isnumber(rc->value)) + t->error++; + + break; + } +} + +static void alerts_v2_add(struct alert_v2_entry *t, RRDCALC *rc) { + t->instances++; + + alert_counts_add(&t->counts, rc); + + dictionary_set(t->nodes, rc->rrdset->rrdhost->machine_guid, NULL, 0); + + char key[UUID_STR_LEN + 1]; + uuid_unparse_lower(rc->config.hash_id, key); + dictionary_set(t->configs, key, NULL, 0); +} + +static void alerts_by_x_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { + static STRING *silent = NULL; + if(unlikely(!silent)) silent = string_strdupz("silent"); + + struct alert_by_x_entry *b = value; + RRDCALC *rc = data; + if(!rc) { + // prototype + b->prototypes.available++; + } + else { + alert_counts_add(&b->running.counts, rc); + + b->running.total++; + + if (rc->config.recipient == silent) + b->running.silent++; + } +} + +static bool alerts_by_x_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value __maybe_unused, void *data __maybe_unused) { + alerts_by_x_insert_callback(item, old_value, data); + return false; +} + +static void alerts_v2_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { + struct rrdcontext_to_json_v2_data *ctl = data; + struct alert_v2_entry *t = value; + RRDCALC *rc = t->tmp; + t->name = rc->config.name; + t->summary = rc->config.summary; // the original summary + t->context = rrdlabels_create(); + t->recipient = rrdlabels_create(); + t->classification = rrdlabels_create(); + t->component = rrdlabels_create(); + t->type = rrdlabels_create(); + if (string_strlen(rc->rrdset->context)) + rrdlabels_add(t->context, string2str(rc->rrdset->context), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.recipient)) + rrdlabels_add(t->recipient, string2str(rc->config.recipient), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.classification)) + rrdlabels_add(t->classification, string2str(rc->config.classification), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.component)) + rrdlabels_add(t->component, string2str(rc->config.component), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.type)) + rrdlabels_add(t->type, string2str(rc->config.type), "yes", RRDLABEL_SRC_AUTO); + t->ati = ctl->alerts.ati++; + + t->nodes = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_VALUE_LINK_DONT_CLONE|DICT_OPTION_NAME_LINK_DONT_CLONE); + t->configs = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_VALUE_LINK_DONT_CLONE|DICT_OPTION_NAME_LINK_DONT_CLONE); + + alerts_v2_add(t, rc); +} + +static bool alerts_v2_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) { + struct alert_v2_entry *t = old_value, *n = new_value; + RRDCALC *rc = n->tmp; + if (string_strlen(rc->rrdset->context)) + rrdlabels_add(t->context, string2str(rc->rrdset->context), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.recipient)) + rrdlabels_add(t->recipient, string2str(rc->config.recipient), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.classification)) + rrdlabels_add(t->classification, string2str(rc->config.classification), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.component)) + rrdlabels_add(t->component, string2str(rc->config.component), "yes", RRDLABEL_SRC_AUTO); + if (string_strlen(rc->config.type)) + rrdlabels_add(t->type, string2str(rc->config.type), "yes", RRDLABEL_SRC_AUTO); + alerts_v2_add(t, rc); + return true; +} + +static void alerts_v2_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct alert_v2_entry *t = value; + + rrdlabels_destroy(t->context); + rrdlabels_destroy(t->recipient); + rrdlabels_destroy(t->classification); + rrdlabels_destroy(t->component); + rrdlabels_destroy(t->type); + + dictionary_destroy(t->nodes); + dictionary_destroy(t->configs); +} + +struct alert_instances_callback_data { + BUFFER *wb; + struct rrdcontext_to_json_v2_data *ctl; + bool debug; +}; + +static int contexts_v2_alert_instance_to_json_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { + struct sql_alert_instance_v2_entry *t = value; + struct alert_instances_callback_data *d = data; + struct rrdcontext_to_json_v2_data *ctl = d->ctl; (void)ctl; + bool debug = d->debug; (void)debug; + BUFFER *wb = d->wb; + + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_uint64(wb, "ni", t->ni); + + buffer_json_member_add_string(wb, "nm", string2str(t->name)); + buffer_json_member_add_string(wb, "ch", string2str(t->chart_id)); + buffer_json_member_add_string(wb, "ch_n", string2str(t->chart_name)); + + if(ctl->request->options & CONTEXTS_OPTION_ALERTS_WITH_SUMMARY) + buffer_json_member_add_uint64(wb, "ati", t->ati); + + if(ctl->request->options & CONTEXTS_OPTION_ALERTS_WITH_INSTANCES) { + buffer_json_member_add_string(wb, "units", string2str(t->units)); + buffer_json_member_add_string(wb, "fami", string2str(t->family)); + buffer_json_member_add_string(wb, "info", string2str(t->info)); + buffer_json_member_add_string(wb, "sum", string2str(t->summary)); + buffer_json_member_add_string(wb, "ctx", string2str(t->context)); + buffer_json_member_add_string(wb, "st", rrdcalc_status2string(t->status)); + buffer_json_member_add_uuid(wb, "tr_i", t->last_transition_id); + buffer_json_member_add_double(wb, "tr_v", t->last_status_change_value); + buffer_json_member_add_time_t(wb, "tr_t", t->last_status_change); + buffer_json_member_add_uuid(wb, "cfg", t->config_hash_id); + buffer_json_member_add_string(wb, "src", string2str(t->source)); + + buffer_json_member_add_string(wb, "to", string2str(t->recipient)); + buffer_json_member_add_string(wb, "tp", string2str(t->type)); + buffer_json_member_add_string(wb, "cm", string2str(t->component)); + buffer_json_member_add_string(wb, "cl", string2str(t->classification)); + + // Agent specific fields + buffer_json_member_add_uint64(wb, "gi", t->global_id); + // rrdcalc_flags_to_json_array (wb, "flags", t->flags); + } + + if(ctl->request->options & CONTEXTS_OPTION_ALERTS_WITH_VALUES) { + // Netdata Cloud fetched these by querying the agents + buffer_json_member_add_double(wb, "v", t->value); + buffer_json_member_add_time_t(wb, "t", t->last_updated); + } + } + buffer_json_object_close(wb); // alert instance + + return 1; +} + +static void contexts_v2_alerts_by_x_update_prototypes(void *data, STRING *type, STRING *component, STRING *classification, STRING *recipient) { + struct rrdcontext_to_json_v2_data *ctl = data; + + dictionary_set_advanced(ctl->alerts.by_type, string2str(type), (ssize_t)string_strlen(type), NULL, sizeof(struct alert_by_x_entry), NULL); + dictionary_set_advanced(ctl->alerts.by_component, string2str(component), (ssize_t)string_strlen(component), NULL, sizeof(struct alert_by_x_entry), NULL); + dictionary_set_advanced(ctl->alerts.by_classification, string2str(classification), (ssize_t)string_strlen(classification), NULL, sizeof(struct alert_by_x_entry), NULL); + dictionary_set_advanced(ctl->alerts.by_recipient, string2str(recipient), (ssize_t)string_strlen(recipient), NULL, sizeof(struct alert_by_x_entry), NULL); +} + +static void contexts_v2_alerts_by_x_to_json(BUFFER *wb, DICTIONARY *dict, const char *key) { + buffer_json_member_add_array(wb, key); + { + struct alert_by_x_entry *b; + dfe_start_read(dict, b) { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_string(wb, "name", b_dfe.name); + buffer_json_member_add_uint64(wb, "cr", b->running.counts.critical); + buffer_json_member_add_uint64(wb, "wr", b->running.counts.warning); + buffer_json_member_add_uint64(wb, "cl", b->running.counts.clear); + buffer_json_member_add_uint64(wb, "er", b->running.counts.error); + buffer_json_member_add_uint64(wb, "running", b->running.total); + + buffer_json_member_add_uint64(wb, "running_silent", b->running.silent); + + if(b->prototypes.available) + buffer_json_member_add_uint64(wb, "available", b->prototypes.available); + } + buffer_json_object_close(wb); + } + dfe_done(b); + } + buffer_json_array_close(wb); +} + +static void contexts_v2_alert_instances_to_json(BUFFER *wb, const char *key, struct rrdcontext_to_json_v2_data *ctl, bool debug) { + buffer_json_member_add_array(wb, key); + { + struct alert_instances_callback_data data = { + .wb = wb, + .ctl = ctl, + .debug = debug, + }; + dictionary_walkthrough_rw(ctl->alerts.alert_instances, DICTIONARY_LOCK_READ, + contexts_v2_alert_instance_to_json_callback, &data); + } + buffer_json_array_close(wb); // alerts_instances +} + +void contexts_v2_alerts_to_json(BUFFER *wb, struct rrdcontext_to_json_v2_data *ctl, bool debug) { + if(ctl->request->options & CONTEXTS_OPTION_ALERTS_WITH_SUMMARY) { + buffer_json_member_add_array(wb, "alerts"); + { + struct alert_v2_entry *t; + dfe_start_read(ctl->alerts.summary, t) + { + buffer_json_add_array_item_object(wb); + { + buffer_json_member_add_uint64(wb, "ati", t->ati); + + buffer_json_member_add_array(wb, "ni"); + void *host_guid; + dfe_start_read(t->nodes, host_guid) { + struct contexts_v2_node *cn = dictionary_get(ctl->nodes.dict,host_guid_dfe.name); + buffer_json_add_array_item_int64(wb, (int64_t) cn->ni); + } + dfe_done(host_guid); + buffer_json_array_close(wb); + + buffer_json_member_add_string(wb, "nm", string2str(t->name)); + buffer_json_member_add_string(wb, "sum", string2str(t->summary)); + + buffer_json_member_add_uint64(wb, "cr", t->counts.critical); + buffer_json_member_add_uint64(wb, "wr", t->counts.warning); + buffer_json_member_add_uint64(wb, "cl", t->counts.clear); + buffer_json_member_add_uint64(wb, "er", t->counts.error); + + buffer_json_member_add_uint64(wb, "in", t->instances); + buffer_json_member_add_uint64(wb, "nd", dictionary_entries(t->nodes)); + buffer_json_member_add_uint64(wb, "cfg", dictionary_entries(t->configs)); + + buffer_json_member_add_array(wb, "ctx"); + rrdlabels_key_to_buffer_array_item(t->context, wb); + buffer_json_array_close(wb); // ctx + + buffer_json_member_add_array(wb, "cls"); + rrdlabels_key_to_buffer_array_item(t->classification, wb); + buffer_json_array_close(wb); // classification + + + buffer_json_member_add_array(wb, "cp"); + rrdlabels_key_to_buffer_array_item(t->component, wb); + buffer_json_array_close(wb); // component + + buffer_json_member_add_array(wb, "ty"); + rrdlabels_key_to_buffer_array_item(t->type, wb); + buffer_json_array_close(wb); // type + + buffer_json_member_add_array(wb, "to"); + rrdlabels_key_to_buffer_array_item(t->recipient, wb); + buffer_json_array_close(wb); // recipient + } + buffer_json_object_close(wb); // alert name + } + dfe_done(t); + } + buffer_json_array_close(wb); // alerts + + health_prototype_metadata_foreach(ctl, contexts_v2_alerts_by_x_update_prototypes); + contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_type, "alerts_by_type"); + contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_component, "alerts_by_component"); + contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_classification, "alerts_by_classification"); + contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_recipient, "alerts_by_recipient"); + contexts_v2_alerts_by_x_to_json(wb, ctl->alerts.by_module, "alerts_by_module"); + } + + if(ctl->request->options & (CONTEXTS_OPTION_ALERTS_WITH_INSTANCES | CONTEXTS_OPTION_ALERTS_WITH_VALUES)) { + contexts_v2_alert_instances_to_json(wb, "alert_instances", ctl, debug); + } +} + +static void alert_instances_v2_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) { + struct rrdcontext_to_json_v2_data *ctl = data; + struct sql_alert_instance_v2_entry *t = value; + RRDCALC *rc = t->tmp; + + t->context = rc->rrdset->context; + t->chart_id = rc->rrdset->id; + t->chart_name = rc->rrdset->name; + t->family = rc->rrdset->family; + t->units = rc->config.units; + t->classification = rc->config.classification; + t->type = rc->config.type; + t->recipient = rc->config.recipient; + t->component = rc->config.component; + t->name = rc->config.name; + t->source = rc->config.source; + t->status = rc->status; + t->flags = rc->run_flags; + t->info = rc->config.info; + t->summary = rc->summary; + t->value = rc->value; + t->last_updated = rc->last_updated; + t->last_status_change = rc->last_status_change; + t->last_status_change_value = rc->last_status_change_value; + t->host = rc->rrdset->rrdhost; + t->alarm_id = rc->id; + t->ni = ctl->nodes.ni; + + uuid_copy(t->config_hash_id, rc->config.hash_id); + health_alarm_log_get_global_id_and_transition_id_for_rrdcalc(rc, &t->global_id, &t->last_transition_id); +} + +static bool alert_instances_v2_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value __maybe_unused, void *new_value __maybe_unused, void *data __maybe_unused) { + internal_fatal(true, "This should never happen!"); + return true; +} + +static void alert_instances_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value __maybe_unused, void *data __maybe_unused) { + ; +} + +static void rrdcontext_v2_set_transition_filter(const char *machine_guid, const char *context, time_t alarm_id, void *data) { + struct rrdcontext_to_json_v2_data *ctl = data; + + if(machine_guid && *machine_guid) { + if(ctl->nodes.scope_pattern) + simple_pattern_free(ctl->nodes.scope_pattern); + + if(ctl->nodes.pattern) + simple_pattern_free(ctl->nodes.pattern); + + ctl->nodes.scope_pattern = string_to_simple_pattern(machine_guid); + ctl->nodes.pattern = NULL; + } + + if(context && *context) { + if(ctl->contexts.scope_pattern) + simple_pattern_free(ctl->contexts.scope_pattern); + + if(ctl->contexts.pattern) + simple_pattern_free(ctl->contexts.pattern); + + ctl->contexts.scope_pattern = string_to_simple_pattern(context); + ctl->contexts.pattern = NULL; + } + + ctl->alerts.alarm_id_filter = alarm_id; +} + +bool rrdcontexts_v2_init_alert_dictionaries(struct rrdcontext_to_json_v2_data *ctl, struct api_v2_contexts_request *req) { + if(req->alerts.transition) { + ctl->options |= CONTEXTS_OPTION_ALERTS_WITH_INSTANCES | CONTEXTS_OPTION_ALERTS_WITH_VALUES; + if(!sql_find_alert_transition(req->alerts.transition, rrdcontext_v2_set_transition_filter, &ctl)) + return false; + } + + ctl->alerts.summary = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, + sizeof(struct alert_v2_entry)); + + dictionary_register_insert_callback(ctl->alerts.summary, alerts_v2_insert_callback, &ctl); + dictionary_register_conflict_callback(ctl->alerts.summary, alerts_v2_conflict_callback, &ctl); + dictionary_register_delete_callback(ctl->alerts.summary, alerts_v2_delete_callback, &ctl); + + ctl->alerts.by_type = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, + sizeof(struct alert_by_x_entry)); + + dictionary_register_insert_callback(ctl->alerts.by_type, alerts_by_x_insert_callback, NULL); + dictionary_register_conflict_callback(ctl->alerts.by_type, alerts_by_x_conflict_callback, NULL); + + ctl->alerts.by_component = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, + sizeof(struct alert_by_x_entry)); + + dictionary_register_insert_callback(ctl->alerts.by_component, alerts_by_x_insert_callback, NULL); + dictionary_register_conflict_callback(ctl->alerts.by_component, alerts_by_x_conflict_callback, NULL); + + ctl->alerts.by_classification = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, + sizeof(struct alert_by_x_entry)); + + dictionary_register_insert_callback(ctl->alerts.by_classification, alerts_by_x_insert_callback, NULL); + dictionary_register_conflict_callback(ctl->alerts.by_classification, alerts_by_x_conflict_callback, NULL); + + ctl->alerts.by_recipient = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, + sizeof(struct alert_by_x_entry)); + + dictionary_register_insert_callback(ctl->alerts.by_recipient, alerts_by_x_insert_callback, NULL); + dictionary_register_conflict_callback(ctl->alerts.by_recipient, alerts_by_x_conflict_callback, NULL); + + ctl->alerts.by_module = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, + sizeof(struct alert_by_x_entry)); + + dictionary_register_insert_callback(ctl->alerts.by_module, alerts_by_x_insert_callback, NULL); + dictionary_register_conflict_callback(ctl->alerts.by_module, alerts_by_x_conflict_callback, NULL); + + if(ctl->options & (CONTEXTS_OPTION_ALERTS_WITH_INSTANCES | CONTEXTS_OPTION_ALERTS_WITH_VALUES)) { + ctl->alerts.alert_instances = dictionary_create_advanced( + DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, sizeof(struct sql_alert_instance_v2_entry)); + + dictionary_register_insert_callback(ctl->alerts.alert_instances, alert_instances_v2_insert_callback, &ctl); + dictionary_register_conflict_callback(ctl->alerts.alert_instances, alert_instances_v2_conflict_callback, &ctl); + dictionary_register_delete_callback(ctl->alerts.alert_instances, alert_instances_delete_callback, &ctl); + } + + return true; +} + +void rrdcontexts_v2_alerts_cleanup(struct rrdcontext_to_json_v2_data *ctl) { + dictionary_destroy(ctl->alerts.summary); + dictionary_destroy(ctl->alerts.alert_instances); + dictionary_destroy(ctl->alerts.by_type); + dictionary_destroy(ctl->alerts.by_component); + dictionary_destroy(ctl->alerts.by_classification); + dictionary_destroy(ctl->alerts.by_recipient); + dictionary_destroy(ctl->alerts.by_module); +} diff --git a/src/database/contexts/api_v2_contexts_alerts.h b/src/database/contexts/api_v2_contexts_alerts.h new file mode 100644 index 00000000000000..b7be3f4d93bda8 --- /dev/null +++ b/src/database/contexts/api_v2_contexts_alerts.h @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_V2_CONTEXTS_ALERTS_H +#define NETDATA_API_V2_CONTEXTS_ALERTS_H + +#include "internal.h" +#include "api_v2_contexts.h" + +struct alert_transitions_callback_data { + struct rrdcontext_to_json_v2_data *ctl; + BUFFER *wb; + bool debug; + bool only_one_config; + + struct { + SIMPLE_PATTERN *pattern; + DICTIONARY *dict; + } facets[ATF_TOTAL_ENTRIES]; + + uint32_t max_items_to_return; + uint32_t items_to_return; + + uint32_t items_evaluated; + uint32_t items_matched; + + + struct sql_alert_transition_fixed_size *base; // double linked list - last item is base->prev + struct sql_alert_transition_fixed_size *last_added; // the last item added, not the last of the list + + struct { + size_t first; + size_t skips_before; + size_t skips_after; + size_t backwards; + size_t forwards; + size_t prepend; + size_t append; + size_t shifts; + } operations; + + uint32_t configs_added; +}; + +void contexts_v2_alerts_to_json(BUFFER *wb, struct rrdcontext_to_json_v2_data *ctl, bool debug); +bool rrdcontext_matches_alert(struct rrdcontext_to_json_v2_data *ctl, RRDCONTEXT *rc); +void contexts_v2_alert_config_to_json_from_sql_alert_config_data(struct sql_alert_config_data *t, void *data); +void contexts_v2_alert_transitions_to_json(BUFFER *wb, struct rrdcontext_to_json_v2_data *ctl, bool debug); + +bool rrdcontexts_v2_init_alert_dictionaries(struct rrdcontext_to_json_v2_data *ctl, struct api_v2_contexts_request *req); +void rrdcontexts_v2_alerts_cleanup(struct rrdcontext_to_json_v2_data *ctl); + +#endif //NETDATA_API_V2_CONTEXTS_ALERTS_H diff --git a/src/database/contexts/query_scope.c b/src/database/contexts/query_scope.c index f3bcd0b3faeb67..f243c4a9f4e6ef 100644 --- a/src/database/contexts/query_scope.c +++ b/src/database/contexts/query_scope.c @@ -18,8 +18,8 @@ ssize_t query_scope_foreach_host(SIMPLE_PATTERN *scope_hosts_sp, SIMPLE_PATTERN uint64_t t_hash = 0; dfe_start_read(rrdhost_root_index, host) { - if(host->node_id) - uuid_unparse_lower(*host->node_id, host_node_id_str); + if(!uuid_is_null(host->node_id)) + uuid_unparse_lower(host->node_id, host_node_id_str); else host_node_id_str[0] = '\0'; diff --git a/src/database/contexts/query_target.c b/src/database/contexts/query_target.c index 29a9c3e5910d17..fbdc9d793c9614 100644 --- a/src/database/contexts/query_target.c +++ b/src/database/contexts/query_target.c @@ -897,9 +897,9 @@ static ssize_t query_node_add(void *data, RRDHOST *host, bool queryable_host) { QUERY_TARGET *qt = qtl->qt; QUERY_NODE *qn = query_node_allocate(qt, host); - if(host->node_id) { + if(!uuid_is_null(host->node_id)) { if(!qtl->host_node_id_str[0]) - uuid_unparse_lower(*host->node_id, qn->node_id); + uuid_unparse_lower(host->node_id, qn->node_id); else memcpy(qn->node_id, qtl->host_node_id_str, sizeof(qn->node_id)); } @@ -958,7 +958,7 @@ static ssize_t query_node_add(void *data, RRDHOST *host, bool queryable_host) { void query_target_generate_name(QUERY_TARGET *qt) { char options_buffer[100 + 1]; - web_client_api_request_v1_data_options_to_string(options_buffer, 100, qt->request.options); + web_client_api_request_data_vX_options_to_string(options_buffer, 100, qt->request.options); char resampling_buffer[20 + 1] = ""; if(qt->request.resampling_time > 1) @@ -1120,8 +1120,8 @@ QUERY_TARGET *query_target_create(QUERY_TARGET_REQUEST *qtr) { } if(host) { - if(host->node_id) - uuid_unparse_lower(*host->node_id, qtl.host_node_id_str); + if(!uuid_is_null(host->node_id)) + uuid_unparse_lower(host->node_id, qtl.host_node_id_str); else qtl.host_node_id_str[0] = '\0'; diff --git a/src/database/contexts/rrdcontext.c b/src/database/contexts/rrdcontext.c index f755e1f7ed1f05..7e299a16e65dcf 100644 --- a/src/database/contexts/rrdcontext.c +++ b/src/database/contexts/rrdcontext.c @@ -198,21 +198,16 @@ int rrdcontext_foreach_instance_with_rrdset_in_context(RRDHOST *host, const char // ---------------------------------------------------------------------------- // ACLK interface -static bool rrdhost_check_our_claim_id(const char *claim_id) { - if(!localhost->aclk_state.claimed_id) return false; - return (strcasecmp(claim_id, localhost->aclk_state.claimed_id) == 0) ? true : false; -} - void rrdcontext_hub_checkpoint_command(void *ptr) { struct ctxs_checkpoint *cmd = ptr; - if(!rrdhost_check_our_claim_id(cmd->claim_id)) { + if(!claim_id_matches(cmd->claim_id)) { + CLAIM_ID claim_id = claim_id_get(); nd_log(NDLS_DAEMON, NDLP_WARNING, "RRDCONTEXT: received checkpoint command for claim_id '%s', node id '%s', " "but this is not our claim id. Ours '%s', received '%s'. Ignoring command.", cmd->claim_id, cmd->node_id, - localhost->aclk_state.claimed_id?localhost->aclk_state.claimed_id:"NOT SET", - cmd->claim_id); + claim_id.str, cmd->claim_id); return; } @@ -245,10 +240,9 @@ void rrdcontext_hub_checkpoint_command(void *ptr) { "Sending snapshot of all contexts.", cmd->version_hash, rrdhost_hostname(host), our_version_hash); -#ifdef ENABLE_ACLK // prepare the snapshot char uuid[UUID_STR_LEN]; - uuid_unparse_lower(*host->node_id, uuid); + uuid_unparse_lower(host->node_id, uuid); contexts_snapshot_t bundle = contexts_snapshot_new(cmd->claim_id, uuid, our_version_hash); // do a deep scan on every metric of the host to make sure all our data are updated @@ -262,7 +256,6 @@ void rrdcontext_hub_checkpoint_command(void *ptr) { // send it aclk_send_contexts_snapshot(bundle); -#endif } nd_log(NDLS_DAEMON, NDLP_DEBUG, @@ -271,7 +264,7 @@ void rrdcontext_hub_checkpoint_command(void *ptr) { rrdhost_flag_set(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS); char node_str[UUID_STR_LEN]; - uuid_unparse_lower(*host->node_id, node_str); + uuid_unparse_lower(host->node_id, node_str); nd_log(NDLS_ACCESS, NDLP_DEBUG, "ACLK REQ [%s (%s)]: STREAM CONTEXTS ENABLED", node_str, rrdhost_hostname(host)); @@ -280,13 +273,13 @@ void rrdcontext_hub_checkpoint_command(void *ptr) { void rrdcontext_hub_stop_streaming_command(void *ptr) { struct stop_streaming_ctxs *cmd = ptr; - if(!rrdhost_check_our_claim_id(cmd->claim_id)) { + if(!claim_id_matches(cmd->claim_id)) { + CLAIM_ID claim_id = claim_id_get(); nd_log(NDLS_DAEMON, NDLP_WARNING, "RRDCONTEXT: received stop streaming command for claim_id '%s', node id '%s', " "but this is not our claim id. Ours '%s', received '%s'. Ignoring command.", cmd->claim_id, cmd->node_id, - localhost->aclk_state.claimed_id?localhost->aclk_state.claimed_id:"NOT SET", - cmd->claim_id); + claim_id.str, cmd->claim_id); return; } diff --git a/src/database/contexts/rrdcontext.h b/src/database/contexts/rrdcontext.h index 9fea55d38cdca5..0906329bc2b7a9 100644 --- a/src/database/contexts/rrdcontext.h +++ b/src/database/contexts/rrdcontext.h @@ -623,10 +623,10 @@ struct api_v2_contexts_request { char *contexts; char *q; - CONTEXTS_V2_OPTIONS options; + CONTEXTS_OPTIONS options; struct { - CONTEXTS_V2_ALERT_STATUS status; + CONTEXTS_ALERT_STATUS status; char *alert; char *transition; uint32_t last; diff --git a/src/database/contexts/worker.c b/src/database/contexts/worker.c index 6012c14f5f6a3e..953e61e6637deb 100644 --- a/src/database/contexts/worker.c +++ b/src/database/contexts/worker.c @@ -818,7 +818,6 @@ void rrdcontext_message_send_unsafe(RRDCONTEXT *rc, bool snapshot __maybe_unused rc->hub.last_time_s = rrd_flag_is_collected(rc) ? 0 : rc->last_time_s; rc->hub.deleted = rrd_flag_is_deleted(rc) ? true : false; -#ifdef ENABLE_ACLK struct context_updated message = { .id = rc->hub.id, .version = rc->hub.version, @@ -840,7 +839,6 @@ void rrdcontext_message_send_unsafe(RRDCONTEXT *rc, bool snapshot __maybe_unused else contexts_updated_add_ctx_update(bundle, &message); } -#endif // store it to SQL @@ -956,7 +954,7 @@ static void rrdcontext_dequeue_from_hub_queue(RRDCONTEXT *rc) { static void rrdcontext_dispatch_queued_contexts_to_hub(RRDHOST *host, usec_t now_ut) { // check if we have received a streaming command for this host - if(!host->node_id || !rrdhost_flag_check(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS) || !aclk_connected || !host->rrdctx.hub_queue) + if(uuid_is_null(host->node_id) || !rrdhost_flag_check(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS) || !aclk_online_for_contexts() || !host->rrdctx.hub_queue) return; // check if there are queued items to send @@ -975,9 +973,9 @@ static void rrdcontext_dispatch_queued_contexts_to_hub(RRDHOST *host, usec_t now worker_is_busy(WORKER_JOB_QUEUED); usec_t dispatch_ut = rrdcontext_calculate_queued_dispatch_time_ut(rc, now_ut); - char *claim_id = get_agent_claimid(); + CLAIM_ID claim_id = claim_id_get(); - if(unlikely(now_ut >= dispatch_ut) && claim_id) { + if(unlikely(now_ut >= dispatch_ut) && claim_id_is_set(claim_id)) { worker_is_busy(WORKER_JOB_CHECK); rrdcontext_lock(rc); @@ -985,15 +983,13 @@ static void rrdcontext_dispatch_queued_contexts_to_hub(RRDHOST *host, usec_t now if(check_if_cloud_version_changed_unsafe(rc, true)) { worker_is_busy(WORKER_JOB_SEND); -#ifdef ENABLE_ACLK if(!bundle) { // prepare the bundle to send the messages char uuid[UUID_STR_LEN]; - uuid_unparse_lower(*host->node_id, uuid); + uuid_unparse_lower(host->node_id, uuid); - bundle = contexts_updated_new(claim_id, uuid, 0, now_ut); + bundle = contexts_updated_new(claim_id.str, uuid, 0, now_ut); } -#endif // update the hub data of the context, give a new version, pack the message // and save an update to SQL rrdcontext_message_send_unsafe(rc, false, bundle); @@ -1030,11 +1026,9 @@ static void rrdcontext_dispatch_queued_contexts_to_hub(RRDHOST *host, usec_t now else rrdcontext_unlock(rc); } - freez(claim_id); } dfe_done(rc); -#ifdef ENABLE_ACLK if(service_running(SERVICE_CONTEXT) && bundle) { // we have a bundle to send messages @@ -1046,7 +1040,6 @@ static void rrdcontext_dispatch_queued_contexts_to_hub(RRDHOST *host, usec_t now } else if(bundle) contexts_updated_delete(bundle); -#endif } diff --git a/src/database/rrd.h b/src/database/rrd.h index bd31e21e130e1a..93298ec2fbaeb9 100644 --- a/src/database/rrd.h +++ b/src/database/rrd.h @@ -1188,7 +1188,7 @@ struct rrdhost { struct rrdhost_system_info *system_info; // information collected from the host environment // ------------------------------------------------------------------------ - // streaming of data to remote hosts - rrdpush sender + // streaming of data to remote hosts - rrdpush struct { struct { @@ -1204,6 +1204,10 @@ struct rrdhost { uint32_t last_used; // the last slot we used for a chart (increments only) } pluginsd_chart_slots; + + char *destination; // where to send metrics to + char *api_key; // the api key at the receiving netdata + SIMPLE_PATTERN *charts_matching; // pattern to match the charts to be sent } send; struct { @@ -1215,11 +1219,8 @@ struct rrdhost { } receive; } rrdpush; - char *rrdpush_send_destination; // where to send metrics to - char *rrdpush_send_api_key; // the api key at the receiving netdata struct rrdpush_destinations *destinations; // a linked list of possible destinations struct rrdpush_destinations *destination; // the current destination from the above list - SIMPLE_PATTERN *rrdpush_send_charts_matching; // pattern to match the charts to be sent int32_t rrdpush_last_receiver_exit_reason; time_t rrdpush_seconds_to_replicate; // max time we want to replicate from the child @@ -1247,7 +1248,7 @@ struct rrdhost { int connected_children_count; // number of senders currently streaming struct receiver_state *receiver; - netdata_mutex_t receiver_lock; + SPINLOCK receiver_lock; int trigger_chart_obsoletion_check; // set when child connects, will instruct parent to // trigger a check for obsoleted charts since previous connect @@ -1307,10 +1308,12 @@ struct rrdhost { } retention; nd_uuid_t host_uuid; // Global GUID for this host - nd_uuid_t *node_id; // Cloud node_id + nd_uuid_t node_id; // Cloud node_id - netdata_mutex_t aclk_state_lock; - aclk_rrdhost_state aclk_state; + struct { + ND_UUID claim_id_of_origin; + ND_UUID claim_id_of_parent; + } aclk; struct rrdhost *next; struct rrdhost *prev; @@ -1325,9 +1328,6 @@ extern RRDHOST *localhost; #define rrdhost_program_name(host) string2str((host)->program_name) #define rrdhost_program_version(host) string2str((host)->program_version) -#define rrdhost_aclk_state_lock(host) netdata_mutex_lock(&((host)->aclk_state_lock)) -#define rrdhost_aclk_state_unlock(host) netdata_mutex_unlock(&((host)->aclk_state_lock)) - #define rrdhost_receiver_replicating_charts(host) (__atomic_load_n(&((host)->rrdpush_receiver_replicating_charts), __ATOMIC_RELAXED)) #define rrdhost_receiver_replicating_charts_plus_one(host) (__atomic_add_fetch(&((host)->rrdpush_receiver_replicating_charts), 1, __ATOMIC_RELAXED)) #define rrdhost_receiver_replicating_charts_minus_one(host) (__atomic_sub_fetch(&((host)->rrdpush_receiver_replicating_charts), 1, __ATOMIC_RELAXED)) diff --git a/src/database/rrdfunctions-exporters.c b/src/database/rrdfunctions-exporters.c index afcdc8a981c10c..205d48bf3dbd4f 100644 --- a/src/database/rrdfunctions-exporters.c +++ b/src/database/rrdfunctions-exporters.c @@ -60,7 +60,7 @@ static void functions2json(DICTIONARY *functions, BUFFER *wb) { struct rrd_host_function *t; dfe_start_read(functions, t) { if (!rrd_collector_running(t->collector)) continue; - if(t->options & RRD_FUNCTION_DYNCFG) continue; + if(t->options & (RRD_FUNCTION_DYNCFG|RRD_FUNCTION_HIDDEN)) continue; buffer_json_member_add_object(wb, t_dfe.name); { @@ -99,7 +99,7 @@ void host_functions2json(RRDHOST *host, BUFFER *wb) { struct rrd_host_function *t; dfe_start_read(host->functions, t) { if(!rrd_collector_running(t->collector)) continue; - if(t->options & RRD_FUNCTION_DYNCFG) continue; + if(t->options & (RRD_FUNCTION_DYNCFG|RRD_FUNCTION_HIDDEN)) continue; buffer_json_member_add_object(wb, t_dfe.name); { @@ -130,7 +130,7 @@ void chart_functions_to_dict(DICTIONARY *rrdset_functions_view, DICTIONARY *dst, struct rrd_host_function *t; dfe_start_read(rrdset_functions_view, t) { if(!rrd_collector_running(t->collector)) continue; - if(t->options & RRD_FUNCTION_DYNCFG) continue; + if(t->options & (RRD_FUNCTION_DYNCFG|RRD_FUNCTION_HIDDEN)) continue; dictionary_set(dst, t_dfe.name, value, value_size); } @@ -144,7 +144,7 @@ void host_functions_to_dict(RRDHOST *host, DICTIONARY *dst, void *value, size_t struct rrd_host_function *t; dfe_start_read(host->functions, t) { if(!rrd_collector_running(t->collector)) continue; - if(t->options & RRD_FUNCTION_DYNCFG) continue; + if(t->options & (RRD_FUNCTION_DYNCFG|RRD_FUNCTION_HIDDEN)) continue; if(help) *help = t->help; diff --git a/src/database/rrdfunctions-inflight.c b/src/database/rrdfunctions-inflight.c index adb27b3e7d7d92..ac292443274855 100644 --- a/src/database/rrdfunctions-inflight.c +++ b/src/database/rrdfunctions-inflight.c @@ -398,7 +398,7 @@ int rrd_function_run(RRDHOST *host, BUFFER *result_wb, int timeout_s, rrd_function_result_callback_t result_cb, void *result_cb_data, rrd_function_progress_cb_t progress_cb, void *progress_cb_data, rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, - BUFFER *payload, const char *source) { + BUFFER *payload, const char *source, bool hidden) { int code; char sanitized_cmd[PLUGINSD_LINE_MAX + 1]; @@ -436,15 +436,21 @@ int rrd_function_run(RRDHOST *host, BUFFER *result_wb, int timeout_s, struct rrd_host_function *rdcf = dictionary_acquired_item_value(host_function_acquired); - if(!http_access_user_has_enough_access_level_for_endpoint(user_access, rdcf->access)) { + if((rdcf->options & RRD_FUNCTION_HIDDEN) && !hidden) { + code = rrd_call_function_error(result_wb, + "You cannot access a hidden function like this. ", + HTTP_ACCESS_PERMISSION_DENIED_HTTP_CODE(user_access)); + dictionary_acquired_item_release(host->functions, host_function_acquired); - if(!aclk_connected) - code = rrd_call_function_error(result_wb, - "This Netdata must be connected to Netdata Cloud for Single-Sign-On (SSO) " - "access this feature. Claim this Netdata to Netdata Cloud to enable access.", - HTTP_ACCESS_PERMISSION_DENIED_HTTP_CODE(user_access)); + if(result_cb) + result_cb(result_wb, code, result_cb_data); + + return code; + } + + if(!http_access_user_has_enough_access_level_for_endpoint(user_access, rdcf->access)) { - else if((rdcf->access & HTTP_ACCESS_SIGNED_ID) && !(user_access & HTTP_ACCESS_SIGNED_ID)) + if((rdcf->access & HTTP_ACCESS_SIGNED_ID) && !(user_access & HTTP_ACCESS_SIGNED_ID)) code = rrd_call_function_error(result_wb, "You need to be authenticated via Netdata Cloud Single-Sign-On (SSO) " "to access this feature. Sign-in on this dashboard, " diff --git a/src/database/rrdfunctions-inline.c b/src/database/rrdfunctions-inline.c index 3eb30e7b58302c..256f72a1f8dd3b 100644 --- a/src/database/rrdfunctions-inline.c +++ b/src/database/rrdfunctions-inline.c @@ -17,7 +17,7 @@ static int rrd_function_run_inline(struct rrd_function_execute *rfe, void *data) if(rfe->is_cancelled.cb && rfe->is_cancelled.cb(rfe->is_cancelled.data)) code = HTTP_RESP_CLIENT_CLOSED_REQUEST; else - code = fi->cb(rfe->result.wb, rfe->function); + code = fi->cb(rfe->result.wb, rfe->function, rfe->payload, rfe->source); if(code == HTTP_RESP_CLIENT_CLOSED_REQUEST || (rfe->is_cancelled.cb && rfe->is_cancelled.cb(rfe->is_cancelled.data))) { buffer_flush(rfe->result.wb); diff --git a/src/database/rrdfunctions-inline.h b/src/database/rrdfunctions-inline.h index 9948edbef0109f..71fb10fe739b75 100644 --- a/src/database/rrdfunctions-inline.h +++ b/src/database/rrdfunctions-inline.h @@ -5,7 +5,7 @@ #include "rrd.h" -typedef int (*rrd_function_execute_inline_cb_t)(BUFFER *wb, const char *function); +typedef int (*rrd_function_execute_inline_cb_t)(BUFFER *wb, const char *function, BUFFER *payload, const char *source); void rrd_function_add_inline(RRDHOST *host, RRDSET *st, const char *name, int timeout, int priority, const char *help, const char *tags, diff --git a/src/database/rrdfunctions-internals.h b/src/database/rrdfunctions-internals.h index a846e4de084ab6..1f5c091d889936 100644 --- a/src/database/rrdfunctions-internals.h +++ b/src/database/rrdfunctions-internals.h @@ -11,6 +11,7 @@ typedef enum __attribute__((packed)) { RRD_FUNCTION_LOCAL = (1 << 0), RRD_FUNCTION_GLOBAL = (1 << 1), RRD_FUNCTION_DYNCFG = (1 << 2), + RRD_FUNCTION_HIDDEN = (1 << 3), // this is 8-bit } RRD_FUNCTION_OPTIONS; diff --git a/src/database/rrdfunctions-progress.c b/src/database/rrdfunctions-progress.c deleted file mode 100644 index 81d663909afe22..00000000000000 --- a/src/database/rrdfunctions-progress.c +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "rrdfunctions-progress.h" - -int rrdhost_function_progress(BUFFER *wb, const char *function __maybe_unused) { - return progress_function_result(wb, rrdhost_hostname(localhost)); -} - diff --git a/src/database/rrdfunctions-progress.h b/src/database/rrdfunctions-progress.h deleted file mode 100644 index 8f97bf7e9b7728..00000000000000 --- a/src/database/rrdfunctions-progress.h +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_RRDFUNCTIONS_PROGRESS_H -#define NETDATA_RRDFUNCTIONS_PROGRESS_H - -#include "rrd.h" - -int rrdhost_function_progress(BUFFER *wb, const char *function __maybe_unused); - -#endif //NETDATA_RRDFUNCTIONS_PROGRESS_H diff --git a/src/database/rrdfunctions-streaming.h b/src/database/rrdfunctions-streaming.h deleted file mode 100644 index cfa15bdb5cb231..00000000000000 --- a/src/database/rrdfunctions-streaming.h +++ /dev/null @@ -1,12 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_RRDFUNCTIONS_STREAMING_H -#define NETDATA_RRDFUNCTIONS_STREAMING_H - -#include "rrd.h" - -#define RRDFUNCTIONS_STREAMING_HELP "Streaming status for parents and children." - -int rrdhost_function_streaming(BUFFER *wb, const char *function); - -#endif //NETDATA_RRDFUNCTIONS_STREAMING_H diff --git a/src/database/rrdfunctions.c b/src/database/rrdfunctions.c index 9411c4c3f74bd4..657b1f446b0a11 100644 --- a/src/database/rrdfunctions.c +++ b/src/database/rrdfunctions.c @@ -225,6 +225,10 @@ void rrd_functions_host_destroy(RRDHOST *host) { // ---------------------------------------------------------------------------- +static inline bool is_function_hidden(const char *name, const char *tags) { + return (name && name[0] == '_' && name[1] == '_') || (tags && strstr(tags, RRDFUNCTIONS_TAG_HIDDEN) != NULL); +} + static inline bool is_function_dyncfg(const char *name) { if(!name || !*name) return false; @@ -239,6 +243,15 @@ static inline bool is_function_dyncfg(const char *name) { return false; } +static inline RRD_FUNCTION_OPTIONS get_function_options(RRDSET *st, const char *name, const char *tags) { + if(is_function_dyncfg(name)) + return RRD_FUNCTION_DYNCFG; + + RRD_FUNCTION_OPTIONS options = st ? RRD_FUNCTION_LOCAL : RRD_FUNCTION_GLOBAL; + + return options | (is_function_hidden(name, tags) ? RRD_FUNCTION_HIDDEN : 0); +} + void rrd_function_add(RRDHOST *host, RRDSET *st, const char *name, int timeout, int priority, const char *help, const char *tags, HTTP_ACCESS access, bool sync, @@ -263,7 +276,7 @@ void rrd_function_add(RRDHOST *host, RRDSET *st, const char *name, int timeout, struct rrd_host_function tmp = { .sync = sync, .timeout = timeout, - .options = st ? RRD_FUNCTION_LOCAL: (is_function_dyncfg(name) ? RRD_FUNCTION_DYNCFG : RRD_FUNCTION_GLOBAL), + .options = get_function_options(st, name, tags), .access = access, .execute_cb = execute_cb, .execute_cb_data = execute_cb_data, @@ -294,17 +307,6 @@ void rrd_function_del(RRDHOST *host, RRDSET *st, const char *name) { dictionary_garbage_collect(host->functions); } -int rrd_call_function_error(BUFFER *wb, const char *msg, int code) { - char buffer[PLUGINSD_LINE_MAX]; - json_escape_string(buffer, msg, PLUGINSD_LINE_MAX); - - buffer_flush(wb); - buffer_sprintf(wb, "{\"status\":%d,\"error_message\":\"%s\"}", code, buffer); - wb->content_type = CT_APPLICATION_JSON; - buffer_no_cacheable(wb); - return code; -} - int rrd_functions_find_by_name(RRDHOST *host, BUFFER *wb, const char *name, size_t key_length, const DICTIONARY_ITEM **item) { char buffer[MAX_FUNCTION_LENGTH + 1]; strncpyz(buffer, name, sizeof(buffer) - 1); diff --git a/src/database/rrdfunctions.h b/src/database/rrdfunctions.h index d3c7f0e13df33f..1c28d73cc24673 100644 --- a/src/database/rrdfunctions.h +++ b/src/database/rrdfunctions.h @@ -7,6 +7,7 @@ #include "libnetdata/libnetdata.h" #define RRDFUNCTIONS_PRIORITY_DEFAULT 100 +#define RRDFUNCTIONS_TAG_HIDDEN "hidden" #define RRDFUNCTIONS_TIMEOUT_EXTENSION_UT (1 * USEC_PER_SEC) @@ -79,9 +80,7 @@ int rrd_function_run(RRDHOST *host, BUFFER *result_wb, int timeout_s, rrd_function_result_callback_t result_cb, void *result_cb_data, rrd_function_progress_cb_t progress_cb, void *progress_cb_data, rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, - BUFFER *payload, const char *source); - -int rrd_call_function_error(BUFFER *wb, const char *msg, int code); + BUFFER *payload, const char *source, bool hidden); bool rrd_function_available(RRDHOST *host, const char *function); @@ -90,7 +89,5 @@ bool rrd_function_has_this_original_result_callback(nd_uuid_t *transaction, rrd_ #include "rrdfunctions-inline.h" #include "rrdfunctions-inflight.h" #include "rrdfunctions-exporters.h" -#include "rrdfunctions-streaming.h" -#include "rrdfunctions-progress.h" #endif // NETDATA_RRDFUNCTIONS_H diff --git a/src/database/rrdhost.c b/src/database/rrdhost.c index b3d786cff38be0..ec9bc86e0f45ef 100644 --- a/src/database/rrdhost.c +++ b/src/database/rrdhost.c @@ -42,7 +42,7 @@ RRDHOST *find_host_by_node_id(char *node_id) { RRDHOST *host, *ret = NULL; dfe_start_read(rrdhost_root_index, host) { - if (host->node_id && uuid_eq(*host->node_id, node_uuid)) { + if (uuid_eq(host->node_id, node_uuid)) { ret = host; break; } @@ -244,15 +244,13 @@ static void rrdhost_initialize_rrdpush_sender(RRDHOST *host, rrdhost_streaming_sender_structures_init(host); -#ifdef ENABLE_HTTPS host->sender->ssl = NETDATA_SSL_UNSET_CONNECTION; -#endif - host->rrdpush_send_destination = strdupz(rrdpush_destination); + host->rrdpush.send.destination = strdupz(rrdpush_destination); rrdpush_destinations_init(host); - host->rrdpush_send_api_key = strdupz(rrdpush_api_key); - host->rrdpush_send_charts_matching = simple_pattern_create(rrdpush_send_charts_matching, NULL, + host->rrdpush.send.api_key = strdupz(rrdpush_api_key); + host->rrdpush.send.charts_matching = simple_pattern_create(rrdpush_send_charts_matching, NULL, SIMPLE_PATTERN_EXACT, true); rrdhost_option_set(host, RRDHOST_OPTION_SENDER_ENABLED); @@ -383,8 +381,7 @@ static RRDHOST *rrdhost_create( host->rrd_history_entries = align_entries_to_pagesize(memory_mode, entries); host->health.health_enabled = ((memory_mode == RRD_MEMORY_MODE_NONE)) ? 0 : health_enabled; - netdata_mutex_init(&host->aclk_state_lock); - netdata_mutex_init(&host->receiver_lock); + spinlock_init(&host->receiver_lock); if (likely(!archived)) { rrd_functions_host_init(host); @@ -535,8 +532,8 @@ static RRDHOST *rrdhost_create( , rrd_memory_mode_name(host->rrd_memory_mode) , host->rrd_history_entries , rrdhost_has_rrdpush_sender_enabled(host)?"enabled":"disabled" - , host->rrdpush_send_destination?host->rrdpush_send_destination:"" - , host->rrdpush_send_api_key?host->rrdpush_send_api_key:"" + , host->rrdpush.send.destination?host->rrdpush.send.destination:"" + , host->rrdpush.send.api_key?host->rrdpush.send.api_key:"" , host->health.health_enabled?"enabled":"disabled" , host->cache_dir , string2str(host->health.health_default_exec) @@ -1027,12 +1024,14 @@ void dbengine_init(char *hostname) { #endif } +void api_v1_management_init(void); + int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unittest) { rrdhost_init(); if (unlikely(sql_init_meta_database(DB_CHECK_NONE, system_info ? 0 : 1))) { if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) { - set_late_global_environment(system_info); + set_late_analytics_variables(system_info); fatal("Failed to initialize SQLite"); } @@ -1112,26 +1111,15 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unitt dyncfg_host_init(localhost); - if(!unittest) { + if(!unittest) health_plugin_init(); - } - - // we register this only on localhost - // for the other nodes, the origin server should register it - rrd_function_add_inline(localhost, NULL, "streaming", 10, - RRDFUNCTIONS_PRIORITY_DEFAULT + 1, RRDFUNCTIONS_STREAMING_HELP, "top", - HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA, - rrdhost_function_streaming); - rrd_function_add_inline(localhost, NULL, "netdata-api-calls", 10, - RRDFUNCTIONS_PRIORITY_DEFAULT + 2, RRDFUNCTIONS_PROGRESS_HELP, "top", - HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA, - rrdhost_function_progress); + global_functions_add(); if (likely(system_info)) { detect_machine_guid_change(&localhost->host_uuid); sql_aclk_sync_init(); - web_client_api_v1_management_init(); + api_v1_management_init(); } return 0; @@ -1284,9 +1272,6 @@ void rrdhost_free___while_having_rrd_wrlock(RRDHOST *host, bool force) { // ------------------------------------------------------------------------ // free it - pthread_mutex_destroy(&host->aclk_state_lock); - freez(host->aclk_state.claimed_id); - freez(host->aclk_state.prev_claimed_id); rrdlabels_destroy(host->rrdlabels); string_freez(host->os); string_freez(host->timezone); @@ -1295,14 +1280,13 @@ void rrdhost_free___while_having_rrd_wrlock(RRDHOST *host, bool force) { string_freez(host->program_version); rrdhost_system_info_free(host->system_info); freez(host->cache_dir); - freez(host->rrdpush_send_api_key); - freez(host->rrdpush_send_destination); + freez(host->rrdpush.send.api_key); + freez(host->rrdpush.send.destination); rrdpush_destinations_free(host); string_freez(host->health.health_default_exec); string_freez(host->health.health_default_recipient); string_freez(host->registry_hostname); - simple_pattern_free(host->rrdpush_send_charts_matching); - freez(host->node_id); + simple_pattern_free(host->rrdpush.send.charts_matching); rrd_functions_host_destroy(host); rrdvariables_destroy(host->rrdvars); @@ -1441,8 +1425,8 @@ static void rrdhost_load_auto_labels(void) { rrdlabels_add(labels, "_hostname", string2str(localhost->hostname), RRDLABEL_SRC_AUTO); rrdlabels_add(labels, "_os", string2str(localhost->os), RRDLABEL_SRC_AUTO); - if (localhost->rrdpush_send_destination) - rrdlabels_add(labels, "_streams_to", localhost->rrdpush_send_destination, RRDLABEL_SRC_AUTO); + if (localhost->rrdpush.send.destination) + rrdlabels_add(labels, "_streams_to", localhost->rrdpush.send.destination, RRDLABEL_SRC_AUTO); } void rrdhost_set_is_parent_label(void) { @@ -1453,11 +1437,7 @@ void rrdhost_set_is_parent_label(void) { rrdlabels_add(labels, "_is_parent", (count) ? "true" : "false", RRDLABEL_SRC_AUTO); //queue a node info -#ifdef ENABLE_ACLK - if (netdata_cloud_enabled) { - aclk_queue_node_info(localhost, false); - } -#endif + aclk_queue_node_info(localhost, false); } } @@ -1498,7 +1478,7 @@ static void rrdhost_load_kubernetes_labels(void) { if(!instance) return; char buffer[1000 + 1]; - while (fgets(buffer, 1000, instance->child_stdout_fp) != NULL) + while (fgets(buffer, 1000, spawn_popen_stdout(instance)) != NULL) rrdlabels_add_pair(localhost->rrdlabels, buffer, RRDLABEL_SRC_AUTO|RRDLABEL_SRC_K8S); // Non-zero exit code means that all the script output is error messages. We've shown already any message that didn't include a ':' @@ -1740,7 +1720,7 @@ void rrdhost_status(RRDHOST *host, time_t now, RRDHOST_STATUS *s) { s->ingest.since = MAX(host->child_connect_time, host->child_disconnected_time); s->ingest.reason = (online) ? STREAM_HANDSHAKE_NEVER : host->rrdpush_last_receiver_exit_reason; - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); s->ingest.hops = (host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1); bool has_receiver = false; if (host->receiver) { @@ -1751,11 +1731,9 @@ void rrdhost_status(RRDHOST *host, time_t now, RRDHOST_STATUS *s) { s->ingest.capabilities = host->receiver->capabilities; s->ingest.peers = socket_peers(host->receiver->fd); -#ifdef ENABLE_HTTPS s->ingest.ssl = SSL_connection(&host->receiver->ssl); -#endif } - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); if (online) { if(s->db.status == RRDHOST_DB_STATUS_INITIALIZING) @@ -1812,9 +1790,7 @@ void rrdhost_status(RRDHOST *host, time_t now, RRDHOST_STATUS *s) { s->stream.since = host->sender->last_state_since_t; s->stream.peers = socket_peers(host->sender->rrdpush_sender_socket); -#ifdef ENABLE_HTTPS s->stream.ssl = SSL_connection(&host->sender->ssl); -#endif memcpy(s->stream.sent_bytes_on_this_connection_per_type, host->sender->sent_bytes_on_this_connection_per_type, diff --git a/src/database/sqlite/sqlite_aclk.c b/src/database/sqlite/sqlite_aclk.c index 027ee8f930eddb..e60088570bc57c 100644 --- a/src/database/sqlite/sqlite_aclk.c +++ b/src/database/sqlite/sqlite_aclk.c @@ -20,7 +20,6 @@ void sanity_check(void) { BUILD_BUG_ON(WORKER_UTILIZATION_MAX_JOB_TYPES < ACLK_MAX_ENUMERATIONS_DEFINED); } -#ifdef ENABLE_ACLK static struct aclk_database_cmd aclk_database_deq_cmd(void) { struct aclk_database_cmd ret = { 0 }; @@ -39,7 +38,6 @@ static struct aclk_database_cmd aclk_database_deq_cmd(void) return ret; } -#endif static void aclk_database_enq_cmd(struct aclk_database_cmd *cmd) { @@ -165,14 +163,14 @@ static int create_host_callback(void *data, int argc, char **argv, char **column #ifdef NETDATA_INTERNAL_CHECKS char node_str[UUID_STR_LEN] = ""; - if (likely(host->node_id)) - uuid_unparse_lower(*host->node_id, node_str); - internal_error(true, "Adding archived host \"%s\" with GUID \"%s\" node id = \"%s\" ephemeral=%d", rrdhost_hostname(host), host->machine_guid, node_str, is_ephemeral); + if (likely(!uuid_is_null(host->node_id))) + uuid_unparse_lower(host->node_id, node_str); + internal_error(true, "Adding archived host \"%s\" with GUID \"%s\" node id = \"%s\" ephemeral=%d", + rrdhost_hostname(host), host->machine_guid, node_str, is_ephemeral); #endif return 0; } -#ifdef ENABLE_ACLK #define SQL_SELECT_ACLK_ALERT_TABLES \ "SELECT 'DROP '||type||' IF EXISTS '||name||';' FROM sqlite_schema WHERE name LIKE 'aclk_alert_%' AND type IN ('table', 'trigger', 'index')" @@ -262,7 +260,7 @@ static void timer_cb(uv_timer_t *handle) uv_update_time(handle->loop); struct aclk_database_cmd cmd = { 0 }; - if (aclk_connected) { + if (aclk_online_for_alerts()) { cmd.opcode = ACLK_DATABASE_PUSH_ALERT; aclk_database_enq_cmd(&cmd); aclk_check_node_info_and_collectors(); @@ -324,7 +322,7 @@ static void aclk_synchronization(void *arg) int live = (host == localhost || host->receiver || !(rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) ? 1 : 0; struct aclk_sync_cfg_t *ahc = host->aclk_config; if (unlikely(!ahc)) - create_aclk_config(host, &host->host_uuid, host->node_id); + create_aclk_config(host, &host->host_uuid, &host->node_id); aclk_host_state_update(host, live, 1); break; case ACLK_DATABASE_NODE_UNREGISTER: @@ -358,13 +356,11 @@ static void aclk_synchronization_init(void) memset(&aclk_sync_config, 0, sizeof(aclk_sync_config)); fatal_assert(0 == uv_thread_create(&aclk_sync_config.thread, aclk_synchronization, &aclk_sync_config)); } -#endif // ------------------------------------------------------------- void create_aclk_config(RRDHOST *host __maybe_unused, nd_uuid_t *host_uuid __maybe_unused, nd_uuid_t *node_id __maybe_unused) { -#ifdef ENABLE_ACLK if (!host || host->aclk_config) return; @@ -374,16 +370,14 @@ void create_aclk_config(RRDHOST *host __maybe_unused, nd_uuid_t *host_uuid __may uuid_unparse_lower(*node_id, wc->node_id); host->aclk_config = wc; - if (node_id && !host->node_id) { - host->node_id = mallocz(sizeof(*host->node_id)); - uuid_copy(*host->node_id, *node_id); + if (node_id && uuid_is_null(host->node_id)) { + uuid_copy(host->node_id, *node_id); } wc->host = host; wc->stream_alerts = false; time_t now = now_realtime_sec(); wc->node_info_send_time = (host == localhost || NULL == localhost) ? now - 25 : now; -#endif } #define SQL_FETCH_ALL_HOSTS \ @@ -419,7 +413,6 @@ void sql_aclk_sync_init(void) // Trigger host context load for hosts that have been created metadata_queue_load_host_context(NULL); -#ifdef ENABLE_ACLK if (!number_of_children) aclk_queue_node_info(localhost, true); @@ -432,7 +425,6 @@ void sql_aclk_sync_init(void) aclk_synchronization_init(); netdata_log_info("ACLK sync initialization completed"); -#endif } static inline void queue_aclk_sync_cmd(enum aclk_database_opcode opcode, const void *param0, const void *param1) @@ -455,18 +447,14 @@ void aclk_push_alert_config(const char *node_id, const char *config_hash) void schedule_node_info_update(RRDHOST *host __maybe_unused) { -#ifdef ENABLE_ACLK if (unlikely(!host)) return; queue_aclk_sync_cmd(ACLK_DATABASE_NODE_STATE, host, NULL); -#endif } -#ifdef ENABLE_ACLK void unregister_node(const char *machine_guid) { if (unlikely(!machine_guid)) return; queue_aclk_sync_cmd(ACLK_DATABASE_NODE_UNREGISTER, strdupz(machine_guid), NULL); } -#endif diff --git a/src/database/sqlite/sqlite_aclk.h b/src/database/sqlite/sqlite_aclk.h index ec8cfa9dd2cf38..c999d1abcdc36d 100644 --- a/src/database/sqlite/sqlite_aclk.h +++ b/src/database/sqlite/sqlite_aclk.h @@ -15,11 +15,6 @@ static inline int uuid_parse_fix(char *in, nd_uuid_t uuid) return uuid_parse(in, uuid); } -static inline int claimed() -{ - return localhost->aclk_state.claimed_id != NULL; -} - enum aclk_database_opcode { ACLK_DATABASE_NOOP = 0, ACLK_DATABASE_NODE_STATE, @@ -56,8 +51,6 @@ void create_aclk_config(RRDHOST *host, nd_uuid_t *host_uuid, nd_uuid_t *node_id) void sql_aclk_sync_init(void); void aclk_push_alert_config(const char *node_id, const char *config_hash); void schedule_node_info_update(RRDHOST *host); -#ifdef ENABLE_ACLK void unregister_node(const char *machine_guid); -#endif #endif //NETDATA_SQLITE_ACLK_H diff --git a/src/database/sqlite/sqlite_aclk_alert.c b/src/database/sqlite/sqlite_aclk_alert.c index 3e707616909d7f..8d6754432ea2f1 100644 --- a/src/database/sqlite/sqlite_aclk_alert.c +++ b/src/database/sqlite/sqlite_aclk_alert.c @@ -3,7 +3,6 @@ #include "sqlite_functions.h" #include "sqlite_aclk_alert.h" -#ifdef ENABLE_ACLK #include "../../aclk/aclk_alarm_api.h" #define SQLITE3_COLUMN_STRDUPZ_OR_NULL(res, param) \ @@ -424,27 +423,25 @@ void health_alarm_log_populate( static void aclk_push_alert_event(RRDHOST *host __maybe_unused) { + CLAIM_ID claim_id = claim_id_get(); - char *claim_id = get_agent_claimid(); - if (!claim_id || !host->node_id) + if (!claim_id_is_set(claim_id) || uuid_is_null(host->node_id)) return; sqlite3_stmt *res = NULL; - if (!PREPARE_STATEMENT(db_meta, SQL_SELECT_ALERT_TO_PUSH, &res)) { - freez(claim_id); + if (!PREPARE_STATEMENT(db_meta, SQL_SELECT_ALERT_TO_PUSH, &res)) return; - } int param = 0; SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC)); char node_id_str[UUID_STR_LEN]; - uuid_unparse_lower(*host->node_id, node_id_str); + uuid_unparse_lower(host->node_id, node_id_str); struct alarm_log_entry alarm_log; alarm_log.node_id = node_id_str; - alarm_log.claim_id = claim_id; + alarm_log.claim_id = claim_id.str; int64_t first_id = 0; int64_t last_id = 0; @@ -484,8 +481,6 @@ static void aclk_push_alert_event(RRDHOST *host __maybe_unused) done: REPORT_BIND_FAIL(res, param); SQLITE_FINALIZE(res); - - freez(claim_id); } #define SQL_DELETE_PROCESSED_ROWS \ @@ -908,16 +903,14 @@ void send_alert_snapshot_to_cloud(RRDHOST *host __maybe_unused) return; } - char *claim_id = get_agent_claimid(); - if (unlikely(!claim_id)) + CLAIM_ID claim_id = claim_id_get(); + if (unlikely(!claim_id_is_set(claim_id))) return; - // Check database for this node to see how many alerts we will need to put in the snapshot + // Check the database for this node to see how many alerts we will need to put in the snapshot int cnt = calculate_alert_snapshot_entries(&host->host_uuid); - if (!cnt) { - freez(claim_id); + if (!cnt) return; - } sqlite3_stmt *res = NULL; if (!PREPARE_STATEMENT(db_meta, SQL_GET_SNAPSHOT_ENTRIES, &res)) @@ -944,13 +937,13 @@ void send_alert_snapshot_to_cloud(RRDHOST *host __maybe_unused) struct alarm_log_entry alarm_log; alarm_snap.node_id = wc->node_id; - alarm_snap.claim_id = claim_id; + alarm_snap.claim_id = claim_id.str; alarm_snap.snapshot_uuid = snapshot_uuid; alarm_snap.chunks = chunks; alarm_snap.chunk = 1; alarm_log.node_id = wc->node_id; - alarm_log.claim_id = claim_id; + alarm_log.claim_id = claim_id.str; cnt = 0; param = 0; @@ -969,7 +962,7 @@ void send_alert_snapshot_to_cloud(RRDHOST *host __maybe_unused) version += alarm_log.version; if (cnt == ALARM_EVENTS_PER_CHUNK) { - if (aclk_connected) + if (aclk_online_for_alerts()) aclk_send_alarm_snapshot(snapshot_proto); cnt = 0; if (alarm_snap.chunk < chunks) { @@ -995,8 +988,6 @@ void send_alert_snapshot_to_cloud(RRDHOST *host __maybe_unused) done: REPORT_BIND_FAIL(res, param); SQLITE_FINALIZE(res); - - freez(claim_id); } // Start streaming alerts @@ -1030,25 +1021,24 @@ void aclk_alert_version_check(char *node_id, char *claim_id, uint64_t cloud_vers { nd_uuid_t node_uuid; - if (unlikely(!node_id || !claim_id || !claimed() || uuid_parse(node_id, node_uuid))) + if (unlikely(!node_id || !claim_id || !is_agent_claimed() || uuid_parse(node_id, node_uuid))) return; - char *agent_claim_id = get_agent_claimid(); - if (claim_id && agent_claim_id && strcmp(agent_claim_id, claim_id) != 0) { - nd_log(NDLS_ACCESS, NDLP_NOTICE, "ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT VALIDATION REQUEST RECEIVED WITH INVALID CLAIM ID", node_id); - goto done; + CLAIM_ID agent_claim_id = claim_id_get(); + if (claim_id && claim_id_is_set(agent_claim_id) && strcmp(agent_claim_id.str, claim_id) != 0) { + nd_log(NDLS_ACCESS, NDLP_NOTICE, + "ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT VALIDATION REQUEST RECEIVED WITH INVALID CLAIM ID", + node_id); + return; } struct aclk_sync_cfg_t *wc; RRDHOST *host = find_host_by_node_id(node_id); if ((!host || !(wc = host->aclk_config))) - nd_log(NDLS_ACCESS, NDLP_NOTICE, "ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT VALIDATION REQUEST RECEIVED FOR INVALID NODE", node_id); + nd_log(NDLS_ACCESS, NDLP_NOTICE, + "ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT VALIDATION REQUEST RECEIVED FOR INVALID NODE", + node_id); else schedule_alert_snapshot_if_needed(wc, cloud_version); - -done: - freez(agent_claim_id); } - -#endif diff --git a/src/database/sqlite/sqlite_aclk_node.c b/src/database/sqlite/sqlite_aclk_node.c index 70d1ebda1a73d4..9ded36ebf19913 100644 --- a/src/database/sqlite/sqlite_aclk_node.c +++ b/src/database/sqlite/sqlite_aclk_node.c @@ -6,8 +6,6 @@ #include "../../aclk/aclk_contexts_api.h" #include "../../aclk/aclk_capas.h" -#ifdef ENABLE_ACLK - DICTIONARY *collectors_from_charts(RRDHOST *host, DICTIONARY *dict) { RRDSET *st; char name[500]; @@ -32,16 +30,18 @@ static void build_node_collectors(RRDHOST *host) struct update_node_collectors upd_node_collectors; DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED); + CLAIM_ID claim_id = claim_id_get(); upd_node_collectors.node_id = wc->node_id; - upd_node_collectors.claim_id = get_agent_claimid(); + upd_node_collectors.claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL; upd_node_collectors.node_collectors = collectors_from_charts(host, dict); aclk_update_node_collectors(&upd_node_collectors); dictionary_destroy(dict); - freez(upd_node_collectors.claim_id); - nd_log(NDLS_ACCESS, NDLP_DEBUG, "ACLK RES [%s (%s)]: NODE COLLECTORS SENT", wc->node_id, rrdhost_hostname(host)); + nd_log(NDLS_ACCESS, NDLP_DEBUG, + "ACLK RES [%s (%s)]: NODE COLLECTORS SENT", + wc->node_id, rrdhost_hostname(host)); } static void build_node_info(RRDHOST *host) @@ -50,9 +50,11 @@ static void build_node_info(RRDHOST *host) struct aclk_sync_cfg_t *wc = host->aclk_config; + CLAIM_ID claim_id = claim_id_get(); + rrd_rdlock(); node_info.node_id = wc->node_id; - node_info.claim_id = get_agent_claimid(); + node_info.claim_id = claim_id_is_set(claim_id) ? claim_id.str : NULL; node_info.machine_guid = host->machine_guid; node_info.child = (host != localhost); node_info.ml_info.ml_capable = ml_capable(); @@ -64,11 +66,11 @@ static void build_node_info(RRDHOST *host) char *host_version = NULL; if (host != localhost) { - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); host_version = strdupz( host->receiver && host->receiver->program_version ? host->receiver->program_version : rrdhost_program_version(host)); - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); } node_info.data.name = rrdhost_hostname(host); @@ -108,7 +110,6 @@ static void build_node_info(RRDHOST *host) host == localhost ? "parent" : "child"); rrd_rdunlock(); - freez(node_info.claim_id); freez(node_info.node_instance_capabilities); freez(host_version); @@ -133,7 +134,7 @@ void aclk_check_node_info_and_collectors(void) { RRDHOST *host; - if (unlikely(!aclk_connected)) + if (unlikely(!aclk_online_for_nodes())) return; size_t context_loading = 0; @@ -190,5 +191,3 @@ void aclk_check_node_info_and_collectors(void) context_pp); } } - -#endif diff --git a/src/database/sqlite/sqlite_health.c b/src/database/sqlite/sqlite_health.c index a632fd494d2aed..15cc8f7e6f51b6 100644 --- a/src/database/sqlite/sqlite_health.c +++ b/src/database/sqlite/sqlite_health.c @@ -134,7 +134,6 @@ int calculate_delay(RRDCALC_STATUS old_status, RRDCALC_STATUS new_status) return delay; } -#ifdef ENABLE_ACLK #define SQL_INSERT_ALERT_PENDING_QUEUE \ "INSERT INTO alert_queue (host_id, health_log_id, unique_id, alarm_id, status, date_scheduled)" \ " VALUES (@host_id, @health_log_id, @unique_id, @alarm_id, @new_status, UNIXEPOCH() + @delay)" \ @@ -179,7 +178,6 @@ static void insert_alert_queue( REPORT_BIND_FAIL(res, param); SQLITE_RESET(res); } -#endif #define SQL_INSERT_HEALTH_LOG_DETAIL \ "INSERT INTO health_log_detail (health_log_id, unique_id, alarm_id, alarm_event_id, " \ @@ -272,11 +270,8 @@ static void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) if (rc == SQLITE_ROW) { health_log_id = (size_t)sqlite3_column_int64(res, 0); sql_health_alarm_log_insert_detail(host, health_log_id, ae); -#ifdef ENABLE_ACLK - if (netdata_cloud_enabled) - insert_alert_queue( - host, health_log_id, (int64_t)ae->unique_id, (int64_t)ae->alarm_id, ae->old_status, ae->new_status); -#endif + insert_alert_queue( + host, health_log_id, (int64_t)ae->unique_id, (int64_t)ae->alarm_id, ae->old_status, ae->new_status); } else error_report("HEALTH [%s]: Failed to execute SQL_INSERT_HEALTH_LOG, rc = %d", rrdhost_hostname(host), rc); @@ -432,14 +427,10 @@ static void sql_inject_removed_status( //update the old entry in health_log sql_update_transition_in_health_log(host, alarm_id, &transition_id, last_transition); -#ifdef ENABLE_ACLK - if (netdata_cloud_enabled) { - int64_t health_log_id = sqlite3_column_int64(res, 0); - RRDCALC_STATUS old_status = (RRDCALC_STATUS)sqlite3_column_double(res, 1); - insert_alert_queue( - host, health_log_id, (int64_t)unique_id, (int64_t)alarm_id, old_status, RRDCALC_STATUS_REMOVED); - } -#endif + int64_t health_log_id = sqlite3_column_int64(res, 0); + RRDCALC_STATUS old_status = (RRDCALC_STATUS)sqlite3_column_double(res, 1); + insert_alert_queue( + host, health_log_id, (int64_t)unique_id, (int64_t)alarm_id, old_status, RRDCALC_STATUS_REMOVED); } //else // error_report("HEALTH [N/A]: Failed to execute SQL_INJECT_REMOVED, rc = %d", rc); diff --git a/src/database/sqlite/sqlite_metadata.c b/src/database/sqlite/sqlite_metadata.c index 1b801b7315bcc4..62483f60e70ce5 100644 --- a/src/database/sqlite/sqlite_metadata.c +++ b/src/database/sqlite/sqlite_metadata.c @@ -78,7 +78,6 @@ const char *database_config[] = { "CREATE INDEX IF NOT EXISTS health_log_d_ind_7 on health_log_detail (alarm_id)", "CREATE INDEX IF NOT EXISTS health_log_d_ind_8 on health_log_detail (new_status, updated_by_id)", -#ifdef ENABLE_ACLK "CREATE TABLE IF NOT EXISTS alert_queue " " (host_id BLOB, health_log_id INT, unique_id INT, alarm_id INT, status INT, date_scheduled INT, " " UNIQUE(host_id, health_log_id, alarm_id))", @@ -88,7 +87,6 @@ const char *database_config[] = { "CREATE TABLE IF NOT EXISTS aclk_queue (sequence_id INTEGER PRIMARY KEY, host_id blob, health_log_id INT, " "unique_id INT, date_created INT, UNIQUE(host_id, health_log_id))", -#endif NULL }; @@ -257,26 +255,20 @@ static inline void set_host_node_id(RRDHOST *host, nd_uuid_t *node_id) return; if (unlikely(!node_id)) { - freez(host->node_id); - __atomic_store_n(&host->node_id, NULL, __ATOMIC_RELAXED); + uuid_clear(host->node_id); return; } struct aclk_sync_cfg_t *wc = host->aclk_config; - if (unlikely(!host->node_id)) { - nd_uuid_t *t = mallocz(sizeof(*host->node_id)); - uuid_copy(*t, *node_id); - __atomic_store_n(&host->node_id, t, __ATOMIC_RELAXED); - } - else { - uuid_copy(*(host->node_id), *node_id); - } + uuid_copy(host->node_id, *node_id); if (unlikely(!wc)) create_aclk_config(host, &host->host_uuid, node_id); else uuid_unparse_lower(*node_id, wc->node_id); + + rrdpush_receiver_send_node_and_claim_id_to_child(host); } #define SQL_SET_HOST_LABEL \ @@ -315,7 +307,7 @@ bool sql_set_host_label(nd_uuid_t *host_id, const char *label_key, const char *l #define SQL_UPDATE_NODE_ID "UPDATE node_instance SET node_id = @node_id WHERE host_id = @host_id" -int update_node_id(nd_uuid_t *host_id, nd_uuid_t *node_id) +int sql_update_node_id(nd_uuid_t *host_id, nd_uuid_t *node_id) { sqlite3_stmt *res = NULL; RRDHOST *host = NULL; @@ -1479,9 +1471,7 @@ static void cleanup_health_log(struct metadata_wc *wc) (void) db_execute(db_meta,"DELETE FROM health_log WHERE host_id NOT IN (SELECT host_id FROM host)"); (void) db_execute(db_meta,"DELETE FROM health_log_detail WHERE health_log_id NOT IN (SELECT health_log_id FROM health_log)"); -#ifdef ENABLE_ACLK (void) db_execute(db_meta,"DELETE FROM alert_version WHERE health_log_id NOT IN (SELECT health_log_id FROM health_log)"); -#endif } // @@ -1631,9 +1621,7 @@ static void restore_host_context(void *arg) rrdhost_flag_clear(host, RRDHOST_FLAG_PENDING_CONTEXT_LOAD); -#ifdef ENABLE_ACLK aclk_queue_node_info(host, false); -#endif nd_log( NDLS_DAEMON, @@ -1952,10 +1940,10 @@ static void start_metadata_hosts(uv_work_t *req __maybe_unused) if (unlikely(rrdhost_flag_check(host, RRDHOST_FLAG_METADATA_CLAIMID))) { rrdhost_flag_clear(host, RRDHOST_FLAG_METADATA_CLAIMID); - nd_uuid_t uuid; int rc; - if (likely(host->aclk_state.claimed_id && !uuid_parse(host->aclk_state.claimed_id, uuid))) - rc = store_claim_id(&host->host_uuid, &uuid); + ND_UUID uuid = claim_id_get_uuid(); + if(!UUIDiszero(uuid)) + rc = store_claim_id(&host->host_uuid, &uuid.uuid); else rc = store_claim_id(&host->host_uuid, NULL); diff --git a/src/database/sqlite/sqlite_metadata.h b/src/database/sqlite/sqlite_metadata.h index 9e76e2a5026f1b..b6f9176d19fcb1 100644 --- a/src/database/sqlite/sqlite_metadata.h +++ b/src/database/sqlite/sqlite_metadata.h @@ -41,7 +41,7 @@ void vacuum_database(sqlite3 *database, const char *db_alias, int threshold, int int sql_metadata_cache_stats(int op); int get_node_id(nd_uuid_t *host_id, nd_uuid_t *node_id); -int update_node_id(nd_uuid_t *host_id, nd_uuid_t *node_id); +int sql_update_node_id(nd_uuid_t *host_id, nd_uuid_t *node_id); struct node_instance_list *get_node_list(void); void sql_load_node_id(RRDHOST *host); diff --git a/src/exporting/clean_connectors.c b/src/exporting/clean_connectors.c index c850c5ffa2a7c1..81413e6619fa3a 100644 --- a/src/exporting/clean_connectors.c +++ b/src/exporting/clean_connectors.c @@ -67,9 +67,7 @@ void simple_connector_cleanup(struct instance *instance) freez(current_buffer); } -#ifdef ENABLE_HTTPS netdata_ssl_close(&simple_connector_data->ssl); -#endif freez(simple_connector_data); diff --git a/src/exporting/exporting_engine.c b/src/exporting/exporting_engine.c index eb5f8a0a8bdf49..4b66875e72164a 100644 --- a/src/exporting/exporting_engine.c +++ b/src/exporting/exporting_engine.c @@ -6,7 +6,6 @@ static struct engine *engine = NULL; void analytics_exporting_connectors_ssl(BUFFER *b) { -#ifdef ENABLE_HTTPS if (netdata_ssl_exporting_ctx) { for (struct instance *instance = engine->instance_root; instance; instance = instance->next) { struct simple_connector_data *connector_specific_data = instance->connector_specific_data; @@ -16,7 +15,6 @@ void analytics_exporting_connectors_ssl(BUFFER *b) } } } -#endif buffer_strcat(b, "|"); } diff --git a/src/exporting/exporting_engine.h b/src/exporting/exporting_engine.h index beaa0ba87647d9..44a2da32231617 100644 --- a/src/exporting/exporting_engine.h +++ b/src/exporting/exporting_engine.h @@ -124,9 +124,7 @@ struct simple_connector_data { struct simple_connector_buffer *first_buffer; struct simple_connector_buffer *last_buffer; -#ifdef ENABLE_HTTPS NETDATA_SSL ssl; -#endif }; struct prometheus_remote_write_specific_config { diff --git a/src/exporting/graphite/graphite.c b/src/exporting/graphite/graphite.c index 1fc1f2b0410d68..a543398929d2b7 100644 --- a/src/exporting/graphite/graphite.c +++ b/src/exporting/graphite/graphite.c @@ -19,12 +19,10 @@ int init_graphite_instance(struct instance *instance) struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data)); instance->connector_specific_data = connector_specific_data; -#ifdef ENABLE_HTTPS connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION; if (instance->config.options & EXPORTING_OPTION_USE_TLS) { netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX); } -#endif instance->start_batch_formatting = NULL; instance->start_host_formatting = format_host_labels_graphite_plaintext; diff --git a/src/exporting/json/json.c b/src/exporting/json/json.c index e9c4db635381ff..d696c735829e7a 100644 --- a/src/exporting/json/json.c +++ b/src/exporting/json/json.c @@ -70,12 +70,10 @@ int init_json_http_instance(struct instance *instance) struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data)); instance->connector_specific_data = connector_specific_data; -#ifdef ENABLE_HTTPS connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION; if (instance->config.options & EXPORTING_OPTION_USE_TLS) { netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX); } -#endif instance->start_batch_formatting = open_batch_json_http; instance->start_host_formatting = format_host_labels_json_plaintext; diff --git a/src/exporting/opentsdb/opentsdb.c b/src/exporting/opentsdb/opentsdb.c index ab4495cb2428db..bee0f443aa00aa 100644 --- a/src/exporting/opentsdb/opentsdb.c +++ b/src/exporting/opentsdb/opentsdb.c @@ -20,12 +20,10 @@ int init_opentsdb_telnet_instance(struct instance *instance) struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data)); instance->connector_specific_data = connector_specific_data; -#ifdef ENABLE_HTTPS connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION; if (instance->config.options & EXPORTING_OPTION_USE_TLS) { netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX); } -#endif instance->start_batch_formatting = NULL; instance->start_host_formatting = format_host_labels_opentsdb_telnet; @@ -75,12 +73,10 @@ int init_opentsdb_http_instance(struct instance *instance) connector_specific_config->default_port = 4242; struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data)); -#ifdef ENABLE_HTTPS connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION; if (instance->config.options & EXPORTING_OPTION_USE_TLS) { netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX); } -#endif instance->connector_specific_data = connector_specific_data; instance->start_batch_formatting = open_batch_json_http; diff --git a/src/exporting/prometheus/remote_write/remote_write.c b/src/exporting/prometheus/remote_write/remote_write.c index b4b6f996bf8528..46ed14441abb4c 100644 --- a/src/exporting/prometheus/remote_write/remote_write.c +++ b/src/exporting/prometheus/remote_write/remote_write.c @@ -114,12 +114,10 @@ int init_prometheus_remote_write_instance(struct instance *instance) struct simple_connector_data *simple_connector_data = callocz(1, sizeof(struct simple_connector_data)); instance->connector_specific_data = simple_connector_data; -#ifdef ENABLE_HTTPS simple_connector_data->ssl = NETDATA_SSL_UNSET_CONNECTION; if (instance->config.options & EXPORTING_OPTION_USE_TLS) { netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX); } -#endif struct prometheus_remote_write_specific_data *connector_specific_data = callocz(1, sizeof(struct prometheus_remote_write_specific_data)); diff --git a/src/exporting/read_config.c b/src/exporting/read_config.c index cd8af6bf61b18a..875e62cf4840c9 100644 --- a/src/exporting/read_config.c +++ b/src/exporting/read_config.c @@ -207,14 +207,14 @@ struct engine *read_exporting_config() if (unlikely(engine)) return engine; - char *filename = strdupz_path_subpath(netdata_configured_user_config_dir, EXPORTING_CONF); + char *filename = filename_from_path_entry_strdupz(netdata_configured_user_config_dir, EXPORTING_CONF); exporting_config_exists = appconfig_load(&exporting_config, filename, 0, NULL); if (!exporting_config_exists) { netdata_log_info("CONFIG: cannot load user exporting config '%s'. Will try the stock version.", filename); freez(filename); - filename = strdupz_path_subpath(netdata_configured_stock_config_dir, EXPORTING_CONF); + filename = filename_from_path_entry_strdupz(netdata_configured_stock_config_dir, EXPORTING_CONF); exporting_config_exists = appconfig_load(&exporting_config, filename, 0, NULL); if (!exporting_config_exists) netdata_log_info("CONFIG: cannot load stock exporting config '%s'. Running with internal defaults.", filename); @@ -468,8 +468,6 @@ struct engine *read_exporting_config() tmp_instance->config.hostname = strdupz(exporter_get(instance_name, "hostname", engine->config.hostname)); -#ifdef ENABLE_HTTPS - #define STR_GRAPHITE_HTTPS "graphite:https" #define STR_JSON_HTTPS "json:https" #define STR_OPENTSDB_HTTPS "opentsdb:https" @@ -487,7 +485,6 @@ struct engine *read_exporting_config() strlen(STR_PROMETHEUS_REMOTE_WRITE_HTTPS)))) { tmp_instance->config.options |= EXPORTING_OPTION_USE_TLS; } -#endif #ifdef NETDATA_INTERNAL_CHECKS netdata_log_info( diff --git a/src/exporting/send_data.c b/src/exporting/send_data.c index 097b7fd4b3ffec..02e9c7b2972535 100644 --- a/src/exporting/send_data.c +++ b/src/exporting/send_data.c @@ -2,7 +2,6 @@ #include "exporting_engine.h" -#ifdef ENABLE_HTTPS /** * Check if TLS is enabled in the configuration * @@ -19,7 +18,6 @@ static int exporting_tls_is_enabled(EXPORTING_CONNECTOR_TYPE type __maybe_unused type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) && options & EXPORTING_OPTION_USE_TLS; } -#endif /** * Discard response @@ -69,28 +67,23 @@ void simple_connector_receive_response(int *sock, struct instance *instance) response = buffer_create(4096, &netdata_buffers_statistics.buffers_exporters); struct stats *stats = &instance->stats; -#ifdef ENABLE_HTTPS uint32_t options = (uint32_t)instance->config.options; struct simple_connector_data *connector_specific_data = instance->connector_specific_data; if (options & EXPORTING_OPTION_USE_TLS) ERR_clear_error(); -#endif errno_clear(); // loop through to collect all data while (*sock != -1 && errno != EWOULDBLOCK) { ssize_t r; -#ifdef ENABLE_HTTPS if (SSL_connection(&connector_specific_data->ssl)) r = netdata_ssl_read(&connector_specific_data->ssl, &response->buffer[response->len], (int) (response->size - response->len)); else r = recv(*sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT); -#else - r = recv(*sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT); -#endif + if (likely(r > 0)) { // we received some data response->len += r; @@ -136,13 +129,11 @@ void simple_connector_send_buffer( flags += MSG_NOSIGNAL; #endif -#ifdef ENABLE_HTTPS uint32_t options = (uint32_t)instance->config.options; struct simple_connector_data *connector_specific_data = instance->connector_specific_data; if (options & EXPORTING_OPTION_USE_TLS) ERR_clear_error(); -#endif struct stats *stats = &instance->stats; ssize_t header_sent_bytes = 0; @@ -150,7 +141,6 @@ void simple_connector_send_buffer( size_t header_len = buffer_strlen(header); size_t buffer_len = buffer_strlen(buffer); -#ifdef ENABLE_HTTPS if (SSL_connection(&connector_specific_data->ssl)) { if (header_len) @@ -166,12 +156,6 @@ void simple_connector_send_buffer( if ((size_t)header_sent_bytes == header_len) buffer_sent_bytes = send(*sock, buffer_tostring(buffer), buffer_len, flags); } -#else - if (header_len) - header_sent_bytes = send(*sock, buffer_tostring(header), header_len, flags); - if ((size_t)header_sent_bytes == header_len) - buffer_sent_bytes = send(*sock, buffer_tostring(buffer), buffer_len, flags); -#endif if ((size_t)buffer_sent_bytes == buffer_len) { // we sent the data successfully @@ -221,12 +205,11 @@ void simple_connector_worker(void *instance_p) snprintfz(threadname, ND_THREAD_TAG_MAX, "EXPSMPL[%zu]", instance->index); uv_thread_set_name_np(threadname); -#ifdef ENABLE_HTTPS uint32_t options = (uint32_t)instance->config.options; if (options & EXPORTING_OPTION_USE_TLS) ERR_clear_error(); -#endif + struct simple_connector_config *connector_specific_config = instance->config.connector_specific_config; int sock = -1; @@ -303,7 +286,7 @@ void simple_connector_worker(void *instance_p) &reconnects, connector_specific_data->connected_to, CONNECTED_TO_MAX); -#ifdef ENABLE_HTTPS + if (exporting_tls_is_enabled(instance->config.type, options) && sock != -1) { if (netdata_ssl_exporting_ctx) { if (sock_delnonblock(sock) < 0) @@ -326,7 +309,6 @@ void simple_connector_worker(void *instance_p) } } } -#endif stats->reconnects += reconnects; } diff --git a/src/health/health.h b/src/health/health.h index b1ac5a9e15b039..4f962eaa584f34 100644 --- a/src/health/health.h +++ b/src/health/health.h @@ -51,7 +51,7 @@ void health_plugin_reload(void); void health_aggregate_alarms(RRDHOST *host, BUFFER *wb, BUFFER* context, RRDCALC_STATUS status); void health_alarms2json(RRDHOST *host, BUFFER *wb, int all); -void health_alert2json_conf(RRDHOST *host, BUFFER *wb, CONTEXTS_V2_OPTIONS all); +void health_alert2json_conf(RRDHOST *host, BUFFER *wb, CONTEXTS_OPTIONS all); void health_alarms_values2json(RRDHOST *host, BUFFER *wb, int all); void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *wb); diff --git a/src/health/health_dyncfg.c b/src/health/health_dyncfg.c index f2b9bc607a3fd0..48346f662a3485 100644 --- a/src/health/health_dyncfg.c +++ b/src/health/health_dyncfg.c @@ -68,8 +68,8 @@ static bool parse_match(json_object *jobj, const char *path, struct rrd_alert_ma } static bool parse_config_value_database_lookup(json_object *jobj, const char *path, struct rrd_alert_config *config, BUFFER *error, bool strict) { - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "after", config->after, error, strict); - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "before", config->before, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "after", config->after, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "before", config->before, error, strict); JSONC_PARSE_TXT2ENUM_OR_ERROR_AND_RETURN(jobj, path, "time_group", time_grouping_txt2id, config->time_group, error, strict); JSONC_PARSE_TXT2ENUM_OR_ERROR_AND_RETURN(jobj, path, "dims_group", alerts_dims_grouping2id, config->dims_group, error, strict); JSONC_PARSE_TXT2ENUM_OR_ERROR_AND_RETURN(jobj, path, "data_source", alerts_data_sources2id, config->data_source, error, strict); @@ -98,7 +98,7 @@ static bool parse_config_value(json_object *jobj, const char *path, struct rrd_a JSONC_PARSE_SUBOBJECT(jobj, path, "database_lookup", config, parse_config_value_database_lookup, error, strict); JSONC_PARSE_TXT2EXPRESSION_OR_ERROR_AND_RETURN(jobj, path, "calculation", config->calculation, error, false); JSONC_PARSE_TXT2STRING_OR_ERROR_AND_RETURN(jobj, path, "units", config->units, error, false); - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "update_every", config->update_every, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "update_every", config->update_every, error, strict); return true; } @@ -109,17 +109,17 @@ static bool parse_config_conditions(json_object *jobj, const char *path, struct } static bool parse_config_action_delay(json_object *jobj, const char *path, struct rrd_alert_config *config, BUFFER *error, bool strict) { - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "up", config->delay_up_duration, error, strict); - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "down", config->delay_down_duration, error, strict); - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "max", config->delay_max_duration, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "up", config->delay_up_duration, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "down", config->delay_down_duration, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "max", config->delay_max_duration, error, strict); JSONC_PARSE_DOUBLE_OR_ERROR_AND_RETURN(jobj, path, "multiplier", config->delay_multiplier, error, strict); return true; } static bool parse_config_action_repeat(json_object *jobj, const char *path, struct rrd_alert_config *config, BUFFER *error, bool strict) { JSONC_PARSE_BOOL_OR_ERROR_AND_RETURN(jobj, path, "enabled", config->has_custom_repeat_config, error, strict); - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "warning", config->warn_repeat_every, error, strict); - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "critical", config->crit_repeat_every, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "warning", config->warn_repeat_every, error, strict); + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, "critical", config->crit_repeat_every, error, strict); return true; } @@ -153,7 +153,7 @@ static bool parse_config(json_object *jobj, const char *path, RRD_ALERT_PROTOTYP static bool parse_prototype(json_object *jobj, const char *path, RRD_ALERT_PROTOTYPE *base, BUFFER *error, const char *name, bool strict) { int64_t version = 0; - JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, "format_version", version, error, strict); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, "format_version", version, error, strict); if(version != 1) { buffer_sprintf(error, "unsupported document version"); @@ -164,6 +164,11 @@ static bool parse_prototype(json_object *jobj, const char *path, RRD_ALERT_PROTO json_object *rules; if (json_object_object_get_ex(jobj, "rules", &rules)) { + if (json_object_get_type(rules) != json_type_array) { + buffer_sprintf(error, "member 'rules' is not an array"); + return false; + } + size_t rules_len = json_object_array_length(rules); RRD_ALERT_PROTOTYPE *ap = base; // fill the first entry @@ -270,7 +275,7 @@ static inline void health_prototype_rule_to_json_array_member(BUFFER *wb, RRD_AL buffer_json_member_add_object(wb, "config"); { if(!for_hashing) { - buffer_json_member_add_uuid(wb, "hash", &ap->config.hash_id); + buffer_json_member_add_uuid(wb, "hash", ap->config.hash_id); buffer_json_member_add_string(wb, "source_type", dyncfg_id2source_type(ap->config.source_type)); buffer_json_member_add_string(wb, "source", string2str(ap->config.source)); } diff --git a/src/health/health_event_loop.c b/src/health/health_event_loop.c index b50812f2a4c8a1..d9f5c2a05e2c53 100644 --- a/src/health/health_event_loop.c +++ b/src/health/health_event_loop.c @@ -295,13 +295,11 @@ static void health_event_loop(void) { } worker_is_busy(WORKER_HEALTH_JOB_HOST_LOCK); -#ifdef ENABLE_ACLK - if (netdata_cloud_enabled) { + { struct aclk_sync_cfg_t *wc = host->aclk_config; if (wc && wc->send_snapshot == 2) continue; } -#endif // the first loop is to lookup values from the db foreach_rrdcalc_in_rrdhost_read(host, rc) { @@ -648,7 +646,6 @@ static void health_event_loop(void) { break; } } -#ifdef ENABLE_ACLK struct aclk_sync_cfg_t *wc = host->aclk_config; if (wc && wc->send_snapshot == 1) { wc->send_snapshot = 2; @@ -657,7 +654,6 @@ static void health_event_loop(void) { else if (process_alert_pending_queue(host)) rrdhost_flag_set(host, RRDHOST_FLAG_ACLK_STREAM_ALERTS); -#endif dfe_done(host); diff --git a/src/libnetdata/buffer/buffer.h b/src/libnetdata/buffer/buffer.h index 92e14afb1885d0..28bbf36bfb6869 100644 --- a/src/libnetdata/buffer/buffer.h +++ b/src/libnetdata/buffer/buffer.h @@ -775,7 +775,7 @@ static inline void buffer_json_member_add_quoted_string(BUFFER *wb, const char * wb->json.stack[wb->json.depth].count++; } -static inline void buffer_json_member_add_uuid(BUFFER *wb, const char *key, nd_uuid_t *value) { +static inline void buffer_json_member_add_uuid_ptr(BUFFER *wb, const char *key, nd_uuid_t *value) { buffer_print_json_comma_newline_spacing(wb); buffer_print_json_key(wb, key); buffer_fast_strcat(wb, ":", 1); @@ -791,6 +791,22 @@ static inline void buffer_json_member_add_uuid(BUFFER *wb, const char *key, nd_u wb->json.stack[wb->json.depth].count++; } +static inline void buffer_json_member_add_uuid(BUFFER *wb, const char *key, nd_uuid_t value) { + buffer_print_json_comma_newline_spacing(wb); + buffer_print_json_key(wb, key); + buffer_fast_strcat(wb, ":", 1); + + if(!uuid_is_null(value)) { + char uuid[GUID_LEN + 1]; + uuid_unparse_lower(value, uuid); + buffer_json_add_string_value(wb, uuid); + } + else + buffer_json_add_string_value(wb, NULL); + + wb->json.stack[wb->json.depth].count++; +} + static inline void buffer_json_member_add_boolean(BUFFER *wb, const char *key, bool value) { buffer_print_json_comma_newline_spacing(wb); buffer_print_json_key(wb, key); diff --git a/src/libnetdata/buffered_reader/buffered_reader.h b/src/libnetdata/buffered_reader/buffered_reader.h index 1ec1d762b88c86..505070b1c1e2d2 100644 --- a/src/libnetdata/buffered_reader/buffered_reader.h +++ b/src/libnetdata/buffered_reader/buffered_reader.h @@ -55,9 +55,7 @@ static inline buffered_reader_ret_t buffered_reader_read(struct buffered_reader static inline buffered_reader_ret_t buffered_reader_read_timeout(struct buffered_reader *reader, int fd, int timeout_ms, bool log_error) { short int revents = 0; switch(wait_on_socket_or_cancel_with_timeout( -#ifdef ENABLE_HTTPS NULL, -#endif fd, timeout_ms, POLLIN, &revents)) { case 0: // data are waiting diff --git a/src/libnetdata/config/appconfig.c b/src/libnetdata/config/appconfig.c index 81946b594a276f..1c151b7029bd4c 100644 --- a/src/libnetdata/config/appconfig.c +++ b/src/libnetdata/config/appconfig.c @@ -653,7 +653,9 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used, cons FILE *fp = fopen(filename, "r"); if(!fp) { - // netdata_log_info("CONFIG: cannot open file '%s'. Using internal defaults.", filename); + if(errno != ENOENT) + netdata_log_info("CONFIG: cannot open file '%s'. Using internal defaults.", filename); + return 0; } @@ -799,7 +801,7 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used, cons return 1; } -void appconfig_generate(struct config *root, BUFFER *wb, int only_changed) +void appconfig_generate(struct config *root, BUFFER *wb, int only_changed, bool netdata_conf) { int i, pri; struct section *co; @@ -811,12 +813,13 @@ void appconfig_generate(struct config *root, BUFFER *wb, int only_changed) if(!strcmp(co->name, CONFIG_SECTION_HOST_LABEL)) found_host_labels = 1; - if(!found_host_labels) { + if(netdata_conf && !found_host_labels) { appconfig_section_create(root, CONFIG_SECTION_HOST_LABEL); appconfig_get(root, CONFIG_SECTION_HOST_LABEL, "name", "value"); } } + if(netdata_conf) { buffer_strcat(wb, "# netdata configuration\n" "#\n" @@ -830,6 +833,7 @@ void appconfig_generate(struct config *root, BUFFER *wb, int only_changed) "# The value shown in the commented settings, is the default value.\n" "#\n" "\n# global netdata configuration\n"); + } for(i = 0; i <= 17 ;i++) { appconfig_wrlock(root); @@ -884,7 +888,12 @@ void appconfig_generate(struct config *root, BUFFER *wb, int only_changed) if(used && !(cv->flags & CONFIG_VALUE_USED)) { buffer_sprintf(wb, "\n\t# option '%s' is not used.\n", cv->name); } - buffer_sprintf(wb, "\t%s%s = %s\n", ((!(cv->flags & CONFIG_VALUE_LOADED)) && (!(cv->flags & CONFIG_VALUE_CHANGED)) && (cv->flags & CONFIG_VALUE_USED))?"# ":"", cv->name, cv->value); + buffer_sprintf(wb, "\t%s%s = %s\n", + ( + !(cv->flags & CONFIG_VALUE_LOADED) && + !(cv->flags & CONFIG_VALUE_CHANGED) && + (cv->flags & CONFIG_VALUE_USED) + )?"# ":"", cv->name, cv->value); } config_section_unlock(co); } diff --git a/src/libnetdata/config/appconfig.h b/src/libnetdata/config/appconfig.h index 214a15eddc1b9f..bdb6c4bd5afae6 100644 --- a/src/libnetdata/config/appconfig.h +++ b/src/libnetdata/config/appconfig.h @@ -186,7 +186,7 @@ int appconfig_set_boolean(struct config *root, const char *section, const char * int appconfig_exists(struct config *root, const char *section, const char *name); int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new); -void appconfig_generate(struct config *root, BUFFER *wb, int only_changed); +void appconfig_generate(struct config *root, BUFFER *wb, int only_changed, bool netdata_conf); int appconfig_section_compare(void *a, void *b); diff --git a/src/libnetdata/dictionary/dictionary.h b/src/libnetdata/dictionary/dictionary.h index 231fbfebddebe1..573ce7290be452 100644 --- a/src/libnetdata/dictionary/dictionary.h +++ b/src/libnetdata/dictionary/dictionary.h @@ -299,7 +299,8 @@ typedef DICTFE_CONST struct dictionary_foreach { #define dfe_start_rw(dict, value, mode) \ do { \ - DICTFE value ## _dfe = {}; \ + /* automatically cleanup DFE, to allow using return from within the loop */ \ + DICTFE _cleanup_(dictionary_foreach_done) value ## _dfe = {}; \ (void)(value); /* needed to avoid warning when looping without using this */ \ for((value) = dictionary_foreach_start_rw(&value ## _dfe, (dict), (mode)); \ (value ## _dfe.item) || (value) ; \ @@ -308,7 +309,6 @@ typedef DICTFE_CONST struct dictionary_foreach { #define dfe_done(value) \ } \ - dictionary_foreach_done(&value ## _dfe); \ } while(0) #define dfe_unlock(value) dictionary_foreach_unlock(&value ## _dfe) diff --git a/src/libnetdata/facets/facets.c b/src/libnetdata/facets/facets.c index 3c746cbc314e6a..5fbef57f0fba6a 100644 --- a/src/libnetdata/facets/facets.c +++ b/src/libnetdata/facets/facets.c @@ -222,6 +222,7 @@ struct facets { SIMPLE_PATTERN *visible_keys; SIMPLE_PATTERN *excluded_keys; SIMPLE_PATTERN *included_keys; + bool all_keys_included_by_default; FACETS_OPTIONS options; @@ -570,11 +571,11 @@ static inline void FACET_VALUE_ADD_CURRENT_VALUE_TO_INDEX(FACET_KEY *k) { k->facets->operations.values.indexed++; } -static inline void FACET_VALUE_ADD_OR_UPDATE_SELECTED(FACET_KEY *k, FACETS_HASH hash) { +static inline void FACET_VALUE_ADD_OR_UPDATE_SELECTED(FACET_KEY *k, const char *name, FACETS_HASH hash) { FACET_VALUE tv = { .hash = hash, .selected = true, - .name = NULL, + .name = name, .name_len = 0, }; FACET_VALUE_ADD_TO_INDEX(k, &tv); @@ -1547,7 +1548,7 @@ static inline void facet_value_is_used(FACET_KEY *k, FACET_VALUE *v) { } static inline bool facets_key_is_facet(FACETS *facets, FACET_KEY *k) { - bool included = true, excluded = false, never = false; + bool included = facets->all_keys_included_by_default, excluded = false, never = false; if(k->options & (FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_NO_FACET | FACET_KEY_OPTION_NEVER_FACET)) { if(k->options & FACET_KEY_OPTION_FACET) { @@ -1594,6 +1595,7 @@ static inline bool facets_key_is_facet(FACETS *facets, FACET_KEY *k) { FACETS *facets_create(uint32_t items_to_return, FACETS_OPTIONS options, const char *visible_keys, const char *facet_keys, const char *non_facet_keys) { FACETS *facets = callocz(1, sizeof(FACETS)); + facets->all_keys_included_by_default = true; facets->options = options; FACETS_KEYS_INDEX_CREATE(facets); @@ -1691,6 +1693,40 @@ void facets_enable_slice_mode(FACETS *facets) { facets->options |= FACETS_OPTION_DONT_SEND_EMPTY_VALUE_FACETS | FACETS_OPTION_SORT_FACETS_ALPHABETICALLY; } +void facets_reset_and_disable_all_facets(FACETS *facets) { + facets->all_keys_included_by_default = false; + + simple_pattern_free(facets->included_keys); + facets->included_keys = NULL; + +// We need this, because the exclusions are good for controlling which key can become a facet. +// The excluded ones are not offered for facets at all. +// simple_pattern_free(facets->excluded_keys); +// facets->excluded_keys = NULL; + + simple_pattern_free(facets->visible_keys); + facets->visible_keys = NULL; + + FACET_KEY *k; + foreach_key_in_facets(facets, k) { + k->options |= FACET_KEY_OPTION_NO_FACET; + k->options &= ~FACET_KEY_OPTION_FACET; + } + foreach_key_in_facets_done(k); +} + +inline FACET_KEY *facets_register_facet(FACETS *facets, const char *name, FACET_KEY_OPTIONS options) { + size_t name_length = strlen(name); + FACETS_HASH hash = FACETS_HASH_FUNCTION(name, name_length); + + FACET_KEY *k = FACETS_KEY_ADD_TO_INDEX(facets, hash, name, name_length, options); + k->options |= FACET_KEY_OPTION_FACET; + k->options &= ~FACET_KEY_OPTION_NO_FACET; + facet_key_late_init(facets, k); + + return k; +} + inline FACET_KEY *facets_register_facet_id(FACETS *facets, const char *key_id, FACET_KEY_OPTIONS options) { if(!is_valid_string_hash(key_id)) return NULL; @@ -1708,16 +1744,25 @@ inline FACET_KEY *facets_register_facet_id(FACETS *facets, const char *key_id, F return k; } -void facets_register_facet_id_filter(FACETS *facets, const char *key_id, char *value_id, FACET_KEY_OPTIONS options) { +void facets_register_facet_filter_id(FACETS *facets, const char *key_id, const char *value_id, FACET_KEY_OPTIONS options) { FACET_KEY *k = facets_register_facet_id(facets, key_id, options); if(k) { if(is_valid_string_hash(value_id)) { k->default_selected_for_values = false; - FACET_VALUE_ADD_OR_UPDATE_SELECTED(k, str_to_facets_hash(value_id)); + FACET_VALUE_ADD_OR_UPDATE_SELECTED(k, NULL, str_to_facets_hash(value_id)); } } } +void facets_register_facet_filter(FACETS *facets, const char *key, const char *value, FACET_KEY_OPTIONS options) { + FACET_KEY *k = facets_register_facet(facets, key, options); + if(k) { + FACETS_HASH hash = FACETS_HASH_FUNCTION(value, strlen(value)); + k->default_selected_for_values = false; + FACET_VALUE_ADD_OR_UPDATE_SELECTED(k, value, hash); + } +} + void facets_set_current_row_severity(FACETS *facets, FACET_ROW_SEVERITY severity) { facets->current_row.severity = severity; } @@ -2437,16 +2482,18 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) if(!k->values.enabled) continue; - if(!facets_sort_and_reorder_values(k)) - // no values for this key - continue; + facets_sort_and_reorder_values(k); buffer_json_add_array_item_object(wb); // key { - buffer_json_member_add_string(wb, "id", hash_to_static_string(k->hash)); - buffer_json_member_add_string(wb, "name", facets_key_name_cached(k - , facets->report.used_hashes_registry - )); + buffer_json_member_add_string( + wb, "id", hash_to_static_string(k->hash)); + + buffer_json_member_add_string( + wb, "name", + facets_key_name_cached(k, facets->report.used_hashes_registry)); + + buffer_json_member_add_string(wb, "raw", k->name); if(!k->order) k->order = facets->order++; buffer_json_member_add_uint64(wb, "order", k->order); @@ -2467,6 +2514,7 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) facets_key_value_transformed(facets, k, v, tb, FACETS_TRANSFORM_FACET); buffer_json_member_add_string(wb, "name", buffer_tostring(tb)); + buffer_json_member_add_string(wb, "raw", v->name); buffer_json_member_add_uint64(wb, "count", v->final_facet_value_counter); buffer_json_member_add_uint64(wb, "order", v->order); } diff --git a/src/libnetdata/facets/facets.h b/src/libnetdata/facets/facets.h index 8364d8612531d9..263a935c85772e 100644 --- a/src/libnetdata/facets/facets.h +++ b/src/libnetdata/facets/facets.h @@ -23,6 +23,7 @@ typedef enum __attribute__((packed)) { } FACETS_TRANSFORMATION_SCOPE; typedef enum __attribute__((packed)) { + FACET_KEY_OPTION_NONE = 0, FACET_KEY_OPTION_FACET = (1 << 0), // filterable values FACET_KEY_OPTION_NO_FACET = (1 << 1), // non-filterable value FACET_KEY_OPTION_NEVER_FACET = (1 << 2), // never enable this field as facet @@ -98,8 +99,14 @@ void facets_set_anchor(FACETS *facets, usec_t start_ut, usec_t stop_ut, FACETS_A void facets_enable_slice_mode(FACETS *facets); bool facets_row_candidate_to_keep(FACETS *facets, usec_t usec); +void facets_reset_and_disable_all_facets(FACETS *facets); + +FACET_KEY *facets_register_facet(FACETS *facets, const char *name, FACET_KEY_OPTIONS options); FACET_KEY *facets_register_facet_id(FACETS *facets, const char *key_id, FACET_KEY_OPTIONS options); -void facets_register_facet_id_filter(FACETS *facets, const char *key_id, char *value_id, FACET_KEY_OPTIONS options); + +void facets_register_facet_filter(FACETS *facets, const char *key, const char *value, FACET_KEY_OPTIONS options); +void facets_register_facet_filter_id(FACETS *facets, const char *key_id, const char *value_id, FACET_KEY_OPTIONS options); + void facets_set_timeframe_and_histogram_by_id(FACETS *facets, const char *key_id, usec_t after_ut, usec_t before_ut); void facets_set_timeframe_and_histogram_by_name(FACETS *facets, const char *key_name, usec_t after_ut, usec_t before_ut); diff --git a/src/libnetdata/functions_evloop/functions_evloop.h b/src/libnetdata/functions_evloop/functions_evloop.h index 5c575bd1786b5d..1519f2042d240e 100644 --- a/src/libnetdata/functions_evloop/functions_evloop.h +++ b/src/libnetdata/functions_evloop/functions_evloop.h @@ -71,6 +71,10 @@ #define PLUGINSD_KEYWORD_CONFIG_ACTION_STATUS "status" #define PLUGINSD_FUNCTION_CONFIG "config" +// claiming +#define PLUGINSD_KEYWORD_NODE_ID "NODE_ID" +#define PLUGINSD_KEYWORD_CLAIMED_ID "CLAIMED_ID" + typedef void (*functions_evloop_worker_execute_t)(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled, BUFFER *payload, HTTP_ACCESS access, const char *source, void *data); diff --git a/src/libnetdata/http/http_access.c b/src/libnetdata/http/http_access.c index 5be63bb1995079..398015cd37b7ee 100644 --- a/src/libnetdata/http/http_access.c +++ b/src/libnetdata/http/http_access.c @@ -3,24 +3,24 @@ #include "../libnetdata.h" static struct { - HTTP_USER_ROLE access; + HTTP_USER_ROLE role; const char *name; } user_roles[] = { - { .access = HTTP_USER_ROLE_NONE, .name = "none" }, - { .access = HTTP_USER_ROLE_ADMIN, .name = "admin" }, - { .access = HTTP_USER_ROLE_MANAGER, .name = "manager" }, - { .access = HTTP_USER_ROLE_TROUBLESHOOTER, .name = "troubleshooter" }, - { .access = HTTP_USER_ROLE_OBSERVER, .name = "observer" }, - { .access = HTTP_USER_ROLE_MEMBER, .name = "member" }, - { .access = HTTP_USER_ROLE_BILLING, .name = "billing" }, - { .access = HTTP_USER_ROLE_ANY, .name = "any" }, - - { .access = HTTP_USER_ROLE_MEMBER, .name = "members" }, - { .access = HTTP_USER_ROLE_ADMIN, .name = "admins" }, - { .access = HTTP_USER_ROLE_ANY, .name = "all" }, + { .role = HTTP_USER_ROLE_NONE, .name = "none" }, + { .role = HTTP_USER_ROLE_ADMIN, .name = "admin" }, + { .role = HTTP_USER_ROLE_MANAGER, .name = "manager" }, + { .role = HTTP_USER_ROLE_TROUBLESHOOTER, .name = "troubleshooter" }, + { .role = HTTP_USER_ROLE_OBSERVER, .name = "observer" }, + { .role = HTTP_USER_ROLE_MEMBER, .name = "member" }, + { .role = HTTP_USER_ROLE_BILLING, .name = "billing" }, + { .role = HTTP_USER_ROLE_ANY, .name = "any" }, + + { .role = HTTP_USER_ROLE_MEMBER, .name = "members" }, + { .role = HTTP_USER_ROLE_ADMIN, .name = "admins" }, + { .role = HTTP_USER_ROLE_ANY, .name = "all" }, // terminator - { .access = 0, .name = NULL }, + { .role = 0, .name = NULL }, }; HTTP_USER_ROLE http_user_role2id(const char *role) { @@ -29,7 +29,7 @@ HTTP_USER_ROLE http_user_role2id(const char *role) { for(size_t i = 0; user_roles[i].name ;i++) { if(strcmp(user_roles[i].name, role) == 0) - return user_roles[i].access; + return user_roles[i].role; } nd_log(NDLS_DAEMON, NDLP_WARNING, "HTTP user role '%s' is not valid", role); @@ -38,7 +38,7 @@ HTTP_USER_ROLE http_user_role2id(const char *role) { const char *http_id2user_role(HTTP_USER_ROLE role) { for(size_t i = 0; user_roles[i].name ;i++) { - if(role == user_roles[i].access) + if(role == user_roles[i].role) return user_roles[i].name; } diff --git a/src/libnetdata/http/http_access.h b/src/libnetdata/http/http_access.h index afc2e1dc71b20b..00929f9b42ed71 100644 --- a/src/libnetdata/http/http_access.h +++ b/src/libnetdata/http/http_access.h @@ -93,12 +93,16 @@ typedef enum __attribute__((packed)) { HTTP_ACL_WEBRTC = (1 << 6), // from WebRTC // HTTP_ACL_API takes the following additional ACLs, based on pattern matching of the client IP - HTTP_ACL_DASHBOARD = (1 << 10), - HTTP_ACL_REGISTRY = (1 << 11), - HTTP_ACL_BADGES = (1 << 12), - HTTP_ACL_MANAGEMENT = (1 << 13), - HTTP_ACL_STREAMING = (1 << 14), - HTTP_ACL_NETDATACONF = (1 << 15), + HTTP_ACL_METRICS = (1 << 10), + HTTP_ACL_FUNCTIONS = (1 << 11), + HTTP_ACL_NODES = (1 << 12), + HTTP_ACL_ALERTS = (1 << 13), + HTTP_ACL_DYNCFG = (1 << 14), + HTTP_ACL_REGISTRY = (1 << 15), + HTTP_ACL_BADGES = (1 << 16), + HTTP_ACL_MANAGEMENT = (1 << 17), + HTTP_ACL_STREAMING = (1 << 18), + HTTP_ACL_NETDATACONF = (1 << 19), // SSL related HTTP_ACL_SSL_OPTIONAL = (1 << 28), @@ -106,6 +110,14 @@ typedef enum __attribute__((packed)) { HTTP_ACL_SSL_DEFAULT = (1 << 30), } HTTP_ACL; +#define HTTP_ACL_DASHBOARD (HTTP_ACL)( \ + HTTP_ACL_METRICS \ + | HTTP_ACL_FUNCTIONS \ + | HTTP_ACL_ALERTS \ + | HTTP_ACL_NODES \ + | HTTP_ACL_DYNCFG \ + ) + #define HTTP_ACL_TRANSPORTS (HTTP_ACL)( \ HTTP_ACL_API \ | HTTP_ACL_API_UDP \ @@ -121,7 +133,11 @@ typedef enum __attribute__((packed)) { ) #define HTTP_ACL_ALL_FEATURES (HTTP_ACL)( \ - HTTP_ACL_DASHBOARD \ + HTTP_ACL_METRICS \ + | HTTP_ACL_FUNCTIONS \ + | HTTP_ACL_NODES \ + | HTTP_ACL_ALERTS \ + | HTTP_ACL_DYNCFG \ | HTTP_ACL_REGISTRY \ | HTTP_ACL_BADGES \ | HTTP_ACL_MANAGEMENT \ @@ -129,20 +145,24 @@ typedef enum __attribute__((packed)) { | HTTP_ACL_NETDATACONF \ ) +#define HTTP_ACL_ACLK_LICENSE_MANAGER (HTTP_ACL)( \ + HTTP_ACL_NODES \ +) + #ifdef NETDATA_DEV_MODE #define ACL_DEV_OPEN_ACCESS HTTP_ACL_NOCHECK #else #define ACL_DEV_OPEN_ACCESS 0 #endif -#define http_can_access_dashboard(w) ((w)->acl & HTTP_ACL_DASHBOARD) -#define http_can_access_registry(w) ((w)->acl & HTTP_ACL_REGISTRY) -#define http_can_access_badges(w) ((w)->acl & HTTP_ACL_BADGES) -#define http_can_access_mgmt(w) ((w)->acl & HTTP_ACL_MANAGEMENT) -#define http_can_access_stream(w) ((w)->acl & HTTP_ACL_STREAMING) -#define http_can_access_netdataconf(w) ((w)->acl & HTTP_ACL_NETDATACONF) -#define http_is_using_ssl_optional(w) ((w)->port_acl & HTTP_ACL_SSL_OPTIONAL) -#define http_is_using_ssl_force(w) ((w)->port_acl & HTTP_ACL_SSL_FORCE) -#define http_is_using_ssl_default(w) ((w)->port_acl & HTTP_ACL_SSL_DEFAULT) +#define http_can_access_dashboard(w) (((w)->acl & HTTP_ACL_DASHBOARD) == HTTP_ACL_DASHBOARD) +#define http_can_access_registry(w) (((w)->acl & HTTP_ACL_REGISTRY) == HTTP_ACL_REGISTRY) +#define http_can_access_badges(w) (((w)->acl & HTTP_ACL_BADGES) == HTTP_ACL_BADGES) +#define http_can_access_mgmt(w) (((w)->acl & HTTP_ACL_MANAGEMENT) == HTTP_ACL_MANAGEMENT) +#define http_can_access_stream(w) (((w)->acl & HTTP_ACL_STREAMING) == HTTP_ACL_STREAMING) +#define http_can_access_netdataconf(w) (((w)->acl & HTTP_ACL_NETDATACONF) == HTTP_ACL_NETDATACONF) +#define http_is_using_ssl_optional(w) (((w)->port_acl & HTTP_ACL_SSL_OPTIONAL) == HTTP_ACL_SSL_OPTIONAL) +#define http_is_using_ssl_force(w) (((w)->port_acl & HTTP_ACL_SSL_FORCE) == HTTP_ACL_SSL_FORCE) +#define http_is_using_ssl_default(w) (((w)->port_acl & HTTP_ACL_SSL_DEFAULT) == HTTP_ACL_SSL_DEFAULT) #endif //NETDATA_HTTP_ACCESS_H diff --git a/src/libnetdata/inlined.h b/src/libnetdata/inlined.h index 6b71590c99af78..624be84919039d 100644 --- a/src/libnetdata/inlined.h +++ b/src/libnetdata/inlined.h @@ -506,6 +506,43 @@ static inline int read_txt_file(const char *filename, char *buffer, size_t size) return 0; } +static inline bool read_txt_file_to_buffer(const char *filename, BUFFER *wb, size_t max_size) { + // Open the file + int fd = open(filename, O_RDONLY | O_CLOEXEC); + if (fd == -1) + return false; + + // Get the file size + struct stat st; + if (fstat(fd, &st) == -1) { + close(fd); + return false; + } + + size_t file_size = st.st_size; + + // Check if the file size exceeds the maximum allowed size + if (file_size > max_size) { + close(fd); + return false; // File size too large + } + + buffer_need_bytes(wb, file_size + 1); + + // Read the file contents into the buffer + ssize_t r = read(fd, &wb->buffer[wb->len], file_size); + if (r != (ssize_t)file_size) { + close(fd); + return false; // Read error + } + wb->len = r; + + // Close the file descriptor + close(fd); + + return true; // Success +} + static inline int read_proc_cmdline(const char *filename, char *buffer, size_t size) { if (unlikely(!size)) return 3; diff --git a/src/libnetdata/json/json-c-parser-inline.c b/src/libnetdata/json/json-c-parser-inline.c new file mode 100644 index 00000000000000..56cbabe12a5eb1 --- /dev/null +++ b/src/libnetdata/json/json-c-parser-inline.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +int rrd_call_function_error(BUFFER *wb, const char *msg, int code) { + buffer_reset(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + buffer_json_member_add_int64(wb, "status", code); + buffer_json_member_add_string(wb, "error_message", msg); + buffer_json_finalize(wb); + return code; +} + +struct json_object *json_parse_function_payload_or_error(BUFFER *output, BUFFER *payload, int *code, json_parse_function_payload_t cb, void *cb_data) { + if(!payload || !buffer_strlen(payload)) { + *code = rrd_call_function_error(output, "No payload given", HTTP_RESP_BAD_REQUEST); + return NULL; + } + + struct json_tokener *tokener = json_tokener_new(); + if (!tokener) { + *code = rrd_call_function_error(output, "Cannot initialize json parser", HTTP_RESP_INTERNAL_SERVER_ERROR); + return NULL; + } + + struct json_object *jobj = json_tokener_parse_ex(tokener, buffer_tostring(payload), (int)buffer_strlen(payload)); + if (json_tokener_get_error(tokener) != json_tokener_success) { + const char *error_msg = json_tokener_error_desc(json_tokener_get_error(tokener)); + char tmp[strlen(error_msg) + 100]; + snprintf(tmp, sizeof(tmp), "JSON parser failed: %s", error_msg); + json_tokener_free(tokener); + *code = rrd_call_function_error(output, tmp, HTTP_RESP_INTERNAL_SERVER_ERROR); + return NULL; + } + json_tokener_free(tokener); + + CLEAN_BUFFER *error = buffer_create(0, NULL); + if(!cb(jobj, "", cb_data, error)) { + char tmp[buffer_strlen(error) + 100]; + snprintfz(tmp, sizeof(tmp), "JSON parser failed: %s", buffer_tostring(error)); + *code = rrd_call_function_error(output, tmp, HTTP_RESP_BAD_REQUEST); + json_object_put(jobj); + return NULL; + } + + *code = HTTP_RESP_OK; + + return jobj; +} diff --git a/src/libnetdata/json/json-c-parser-inline.h b/src/libnetdata/json/json-c-parser-inline.h index c1d60ca455e3a2..bb60a9538302d0 100644 --- a/src/libnetdata/json/json-c-parser-inline.h +++ b/src/libnetdata/json/json-c-parser-inline.h @@ -25,6 +25,32 @@ } \ } while(0) +#define JSONC_PARSE_TXT2STRDUPZ_OR_ERROR_AND_RETURN(jobj, path, member, dst, error, required) do { \ + json_object *_j; \ + if (json_object_object_get_ex(jobj, member, &_j) && json_object_is_type(_j, json_type_string)) { \ + freez((void *)dst); \ + dst = strdupz(json_object_get_string(_j)); \ + } \ + else if(required) { \ + buffer_sprintf(error, "missing or invalid type for '%s.%s' string", path, member); \ + return false; \ + } \ +} while(0) + +#define JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, path, member, dst, error, required) do { \ + json_object *_j; \ + if (json_object_object_get_ex(jobj, member, &_j) && json_object_is_type(_j, json_type_string)) { \ + if(uuid_parse(json_object_get_string(_j), dst) != 0 && required) { \ + buffer_sprintf(error, "invalid UUID '%s.%s'", path, member); \ + return false; \ + } \ + } \ + else if(required) { \ + buffer_sprintf(error, "missing UUID '%s.%s'", path, member); \ + return false; \ + } \ +} while(0) + #define JSONC_PARSE_TXT2BUFFER_OR_ERROR_AND_RETURN(jobj, path, member, dst, error, required) do { \ json_object *_j; \ if (json_object_object_get_ex(jobj, member, &_j) && json_object_is_type(_j, json_type_string)) { \ @@ -122,11 +148,30 @@ } \ } while(0) -#define JSONC_PARSE_INT_OR_ERROR_AND_RETURN(jobj, path, member, dst, error, required) do { \ +#define JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, path, member, dst, error, required) do { \ + json_object *_j; \ + if (json_object_object_get_ex(jobj, member, &_j)) { \ + if (_j != NULL && json_object_is_type(_j, json_type_int)) \ + dst = json_object_get_int64(_j); \ + else if (_j != NULL && json_object_is_type(_j, json_type_double)) \ + dst = (typeof(dst))json_object_get_double(_j); \ + else if (_j == NULL) \ + dst = 0; \ + else { \ + buffer_sprintf(error, "not supported type (expected int) for '%s.%s'", path, member); \ + return false; \ + } \ + } else if(required) { \ + buffer_sprintf(error, "missing or invalid type (expected int value or null) for '%s.%s'", path, member);\ + return false; \ + } \ +} while(0) + +#define JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, path, member, dst, error, required) do { \ json_object *_j; \ if (json_object_object_get_ex(jobj, member, &_j)) { \ if (_j != NULL && json_object_is_type(_j, json_type_int)) \ - dst = json_object_get_int(_j); \ + dst = json_object_get_uint64(_j); \ else if (_j != NULL && json_object_is_type(_j, json_type_double)) \ dst = (typeof(dst))json_object_get_double(_j); \ else if (_j == NULL) \ @@ -136,7 +181,7 @@ return false; \ } \ } else if(required) { \ - buffer_sprintf(error, "missing or invalid type (expected int value or null) for '%s.%s'", path, member); \ + buffer_sprintf(error, "missing or invalid type (expected int value or null) for '%s.%s'", path, member);\ return false; \ } \ } while(0) @@ -174,4 +219,8 @@ } \ } while(0) +typedef bool (*json_parse_function_payload_t)(json_object *jobj, const char *path, void *data, BUFFER *error); +int rrd_call_function_error(BUFFER *wb, const char *msg, int code); +struct json_object *json_parse_function_payload_or_error(BUFFER *output, BUFFER *payload, int *code, json_parse_function_payload_t cb, void *cb_data); + #endif //NETDATA_JSON_C_PARSER_INLINE_H diff --git a/src/libnetdata/libnetdata.c b/src/libnetdata/libnetdata.c index b36a139d23798a..52c5545fc6a9bf 100644 --- a/src/libnetdata/libnetdata.c +++ b/src/libnetdata/libnetdata.c @@ -1302,315 +1302,6 @@ int snprintfz(char *dst, size_t n, const char *fmt, ...) { return ret; } -static int is_procfs(const char *path, char **reason) { -#if defined(__APPLE__) || defined(__FreeBSD__) - (void)path; - (void)reason; -#else - struct statfs stat; - - if (statfs(path, &stat) == -1) { - if (reason) - *reason = "failed to statfs()"; - return -1; - } - -#if defined PROC_SUPER_MAGIC - if (stat.f_type != PROC_SUPER_MAGIC) { - if (reason) - *reason = "type is not procfs"; - return -1; - } -#endif - -#endif - - return 0; -} - -static int is_sysfs(const char *path, char **reason) { -#if defined(__APPLE__) || defined(__FreeBSD__) - (void)path; - (void)reason; -#else - struct statfs stat; - - if (statfs(path, &stat) == -1) { - if (reason) - *reason = "failed to statfs()"; - return -1; - } - -#if defined SYSFS_MAGIC - if (stat.f_type != SYSFS_MAGIC) { - if (reason) - *reason = "type is not sysfs"; - return -1; - } -#endif - -#endif - - return 0; -} - -int verify_netdata_host_prefix(bool log_msg) { - if(!netdata_configured_host_prefix) - netdata_configured_host_prefix = ""; - - if(!*netdata_configured_host_prefix) - return 0; - - char buffer[FILENAME_MAX + 1]; - char *path = netdata_configured_host_prefix; - char *reason = "unknown reason"; - errno_clear(); - - struct stat sb; - if (stat(path, &sb) == -1) { - reason = "failed to stat()"; - goto failed; - } - - if((sb.st_mode & S_IFMT) != S_IFDIR) { - errno = EINVAL; - reason = "is not a directory"; - goto failed; - } - - path = buffer; - snprintfz(path, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix); - if(is_procfs(path, &reason) == -1) - goto failed; - - snprintfz(path, FILENAME_MAX, "%s/sys", netdata_configured_host_prefix); - if(is_sysfs(path, &reason) == -1) - goto failed; - - if (netdata_configured_host_prefix && *netdata_configured_host_prefix) { - if (log_msg) - netdata_log_info("Using host prefix directory '%s'", netdata_configured_host_prefix); - } - - return 0; - -failed: - if (log_msg) - netdata_log_error("Ignoring host prefix '%s': path '%s' %s", netdata_configured_host_prefix, path, reason); - netdata_configured_host_prefix = ""; - return -1; -} - -char *strdupz_path_subpath(const char *path, const char *subpath) { - if(unlikely(!path || !*path)) path = "."; - if(unlikely(!subpath)) subpath = ""; - - // skip trailing slashes in path - size_t len = strlen(path); - while(len > 0 && path[len - 1] == '/') len--; - - // skip leading slashes in subpath - while(subpath[0] == '/') subpath++; - - // if the last character in path is / and (there is a subpath or path is now empty) - // keep the trailing slash in path and remove the additional slash - char *slash = "/"; - if(path[len] == '/' && (*subpath || len == 0)) { - slash = ""; - len++; - } - else if(!*subpath) { - // there is no subpath - // no need for trailing slash - slash = ""; - } - - char buffer[FILENAME_MAX + 1]; - snprintfz(buffer, FILENAME_MAX, "%.*s%s%s", (int)len, path, slash, subpath); - return strdupz(buffer); -} - -int path_is_dir(const char *path, const char *subpath) { - char *s = strdupz_path_subpath(path, subpath); - - size_t max_links = 100; - - int is_dir = 0; - struct stat statbuf; - while(max_links-- && stat(s, &statbuf) == 0) { - if((statbuf.st_mode & S_IFMT) == S_IFDIR) { - is_dir = 1; - break; - } - else if((statbuf.st_mode & S_IFMT) == S_IFLNK) { - char buffer[FILENAME_MAX + 1]; - ssize_t l = readlink(s, buffer, FILENAME_MAX); - if(l > 0) { - buffer[l] = '\0'; - freez(s); - s = strdupz(buffer); - continue; - } - else { - is_dir = 0; - break; - } - } - else { - is_dir = 0; - break; - } - } - - freez(s); - return is_dir; -} - -int path_is_file(const char *path, const char *subpath) { - char *s = strdupz_path_subpath(path, subpath); - - size_t max_links = 100; - - int is_file = 0; - struct stat statbuf; - while(max_links-- && stat(s, &statbuf) == 0) { - if((statbuf.st_mode & S_IFMT) == S_IFREG) { - is_file = 1; - break; - } - else if((statbuf.st_mode & S_IFMT) == S_IFLNK) { - char buffer[FILENAME_MAX + 1]; - ssize_t l = readlink(s, buffer, FILENAME_MAX); - if(l > 0) { - buffer[l] = '\0'; - freez(s); - s = strdupz(buffer); - continue; - } - else { - is_file = 0; - break; - } - } - else { - is_file = 0; - break; - } - } - - freez(s); - return is_file; -} - -void recursive_config_double_dir_load(const char *user_path, const char *stock_path, const char *subpath, int (*callback)(const char *filename, void *data, bool stock_config), void *data, size_t depth) { - if(depth > 3) { - netdata_log_error("CONFIG: Max directory depth reached while reading user path '%s', stock path '%s', subpath '%s'", user_path, stock_path, subpath); - return; - } - - if(!stock_path) - stock_path = user_path; - - char *udir = strdupz_path_subpath(user_path, subpath); - char *sdir = strdupz_path_subpath(stock_path, subpath); - - netdata_log_debug(D_HEALTH, "CONFIG traversing user-config directory '%s', stock config directory '%s'", udir, sdir); - - DIR *dir = opendir(udir); - if (!dir) { - netdata_log_error("CONFIG cannot open user-config directory '%s'.", udir); - } - else { - struct dirent *de = NULL; - while((de = readdir(dir))) { - if(de->d_type == DT_DIR || de->d_type == DT_LNK) { - if( !de->d_name[0] || - (de->d_name[0] == '.' && de->d_name[1] == '\0') || - (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') - ) { - netdata_log_debug(D_HEALTH, "CONFIG ignoring user-config directory '%s/%s'", udir, de->d_name); - continue; - } - - if(path_is_dir(udir, de->d_name)) { - recursive_config_double_dir_load(udir, sdir, de->d_name, callback, data, depth + 1); - continue; - } - } - - if(de->d_type == DT_UNKNOWN || de->d_type == DT_REG || de->d_type == DT_LNK) { - size_t len = strlen(de->d_name); - if(path_is_file(udir, de->d_name) && - len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { - char *filename = strdupz_path_subpath(udir, de->d_name); - netdata_log_debug(D_HEALTH, "CONFIG calling callback for user file '%s'", filename); - callback(filename, data, false); - freez(filename); - continue; - } - } - - netdata_log_debug(D_HEALTH, "CONFIG ignoring user-config file '%s/%s' of type %d", udir, de->d_name, (int)de->d_type); - } - - closedir(dir); - } - - netdata_log_debug(D_HEALTH, "CONFIG traversing stock config directory '%s', user config directory '%s'", sdir, udir); - - dir = opendir(sdir); - if (!dir) { - netdata_log_error("CONFIG cannot open stock config directory '%s'.", sdir); - } - else { - if (strcmp(udir, sdir)) { - struct dirent *de = NULL; - while((de = readdir(dir))) { - if(de->d_type == DT_DIR || de->d_type == DT_LNK) { - if( !de->d_name[0] || - (de->d_name[0] == '.' && de->d_name[1] == '\0') || - (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') - ) { - netdata_log_debug(D_HEALTH, "CONFIG ignoring stock config directory '%s/%s'", sdir, de->d_name); - continue; - } - - if(path_is_dir(sdir, de->d_name)) { - // we recurse in stock subdirectory, only when there is no corresponding - // user subdirectory - to avoid reading the files twice - - if(!path_is_dir(udir, de->d_name)) - recursive_config_double_dir_load(udir, sdir, de->d_name, callback, data, depth + 1); - - continue; - } - } - - if(de->d_type == DT_UNKNOWN || de->d_type == DT_REG || de->d_type == DT_LNK) { - size_t len = strlen(de->d_name); - if(path_is_file(sdir, de->d_name) && !path_is_file(udir, de->d_name) && - len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { - char *filename = strdupz_path_subpath(sdir, de->d_name); - netdata_log_debug(D_HEALTH, "CONFIG calling callback for stock file '%s'", filename); - callback(filename, data, true); - freez(filename); - continue; - } - - } - - netdata_log_debug(D_HEALTH, "CONFIG ignoring stock-config file '%s/%s' of type %d", udir, de->d_name, (int)de->d_type); - } - } - closedir(dir); - } - - netdata_log_debug(D_HEALTH, "CONFIG done traversing user-config directory '%s', stock config directory '%s'", udir, sdir); - - freez(udir); - freez(sdir); -} - // Returns the number of bytes read from the file if file_size is not NULL. // The actual buffer has an extra byte set to zero (not included in the count). char *read_by_filename(const char *filename, long *file_size) @@ -1618,34 +1309,37 @@ char *read_by_filename(const char *filename, long *file_size) FILE *f = fopen(filename, "r"); if (!f) return NULL; + if (fseek(f, 0, SEEK_END) < 0) { fclose(f); return NULL; } + long size = ftell(f); if (size <= 0 || fseek(f, 0, SEEK_END) < 0) { fclose(f); return NULL; } + char *contents = callocz(size + 1, 1); - if (!contents) { - fclose(f); - return NULL; - } if (fseek(f, 0, SEEK_SET) < 0) { fclose(f); freez(contents); return NULL; } + size_t res = fread(contents, 1, size, f); if ( res != (size_t)size) { freez(contents); fclose(f); return NULL; } + fclose(f); + if (file_size) *file_size = size; + return contents; } @@ -1685,7 +1379,7 @@ BUFFER *run_command_and_get_output_to_buffer(const char *command, int max_line_l POPEN_INSTANCE *pi = spawn_popen_run(command); if(pi) { char buffer[max_line_length + 1]; - while (fgets(buffer, max_line_length, pi->child_stdout_fp)) { + while (fgets(buffer, max_line_length, spawn_popen_stdout(pi))) { buffer[max_line_length] = '\0'; buffer_strcat(wb, buffer); } @@ -1705,7 +1399,7 @@ bool run_command_and_copy_output_to_stdout(const char *command, int max_line_len if(pi) { char buffer[max_line_length + 1]; - while (fgets(buffer, max_line_length, pi->child_stdout_fp)) + while (fgets(buffer, max_line_length, spawn_popen_stdout(pi))) fprintf(stdout, "%s", buffer); spawn_popen_kill(pi); @@ -1831,7 +1525,6 @@ void timing_action(TIMING_ACTION action, TIMING_STEP step) { } } -#ifdef ENABLE_HTTPS int hash256_string(const unsigned char *string, size_t size, char *hash) { EVP_MD_CTX *ctx; ctx = EVP_MD_CTX_create(); @@ -1856,7 +1549,6 @@ int hash256_string(const unsigned char *string, size_t size, char *hash) { EVP_MD_CTX_destroy(ctx); return 1; } -#endif bool rrdr_relative_window_to_absolute(time_t *after, time_t *before, time_t now) { diff --git a/src/libnetdata/libnetdata.h b/src/libnetdata/libnetdata.h index b4bddb70a19a18..6ff72bae1c8c30 100644 --- a/src/libnetdata/libnetdata.h +++ b/src/libnetdata/libnetdata.h @@ -9,10 +9,6 @@ extern "C" { #include "config.h" -#ifdef ENABLE_OPENSSL -#define ENABLE_HTTPS 1 -#endif - #ifdef HAVE_LIBDATACHANNEL #define ENABLE_WEBRTC 1 #endif @@ -331,6 +327,7 @@ size_t judy_aral_structures(void); #include "linked-lists.h" #include "storage-point.h" +#include "paths/paths.h" void netdata_fix_chart_id(char *s); void netdata_fix_chart_name(char *s); @@ -394,17 +391,6 @@ int verify_netdata_host_prefix(bool log_msg); extern volatile sig_atomic_t netdata_exit; -char *strdupz_path_subpath(const char *path, const char *subpath); -int path_is_dir(const char *path, const char *subpath); -int path_is_file(const char *path, const char *subpath); -void recursive_config_double_dir_load( - const char *user_path - , const char *stock_path - , const char *subpath - , int (*callback)(const char *filename, void *data, bool stock_config) - , void *data - , size_t depth -); char *read_by_filename(const char *filename, long *file_size); char *find_and_replace(const char *src, const char *find, const char *replace, const char *where); @@ -479,9 +465,7 @@ extern char *netdata_configured_host_prefix; #include "spawn_server/spawn_server.h" #include "spawn_server/spawn_popen.h" #include "simple_pattern/simple_pattern.h" -#ifdef ENABLE_HTTPS -# include "socket/security.h" -#endif +#include "socket/security.h" #include "socket/socket.h" #include "config/appconfig.h" #include "log/journal.h" diff --git a/src/libnetdata/locks/README.md b/src/libnetdata/locks/README.md index 35d602f2a032d0..9425f53901dcb1 100644 --- a/src/libnetdata/locks/README.md +++ b/src/libnetdata/locks/README.md @@ -58,13 +58,13 @@ If any call is expected to pause the caller (ie the caller is attempting a read ``` RW_LOCK ON LOCK 0x0x5651c9fcce20: 4190039 'HEALTH' (function health_execute_pending_updates() 661@health/health.c) WANTS a 'W' lock (while holding 1 rwlocks and 1 mutexes). There are 7 readers and 0 writers are holding the lock: - => 1: RW_LOCK: process 4190091 'WEB_SERVER[static14]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 709847 usec. - => 2: RW_LOCK: process 4190079 'WEB_SERVER[static6]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 709869 usec. - => 3: RW_LOCK: process 4190084 'WEB_SERVER[static10]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 709948 usec. - => 4: RW_LOCK: process 4190076 'WEB_SERVER[static3]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710190 usec. - => 5: RW_LOCK: process 4190092 'WEB_SERVER[static15]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710195 usec. - => 6: RW_LOCK: process 4190077 'WEB_SERVER[static4]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710208 usec. - => 7: RW_LOCK: process 4190044 'WEB_SERVER[static1]' (function web_client_api_request_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710221 usec. + => 1: RW_LOCK: process 4190091 'WEB_SERVER[static14]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 709847 usec. + => 2: RW_LOCK: process 4190079 'WEB_SERVER[static6]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 709869 usec. + => 3: RW_LOCK: process 4190084 'WEB_SERVER[static10]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 709948 usec. + => 4: RW_LOCK: process 4190076 'WEB_SERVER[static3]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710190 usec. + => 5: RW_LOCK: process 4190092 'WEB_SERVER[static15]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710195 usec. + => 6: RW_LOCK: process 4190077 'WEB_SERVER[static4]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710208 usec. + => 7: RW_LOCK: process 4190044 'WEB_SERVER[static1]' (function api_v1_data() 526@web/api/web_api_v1.c) is having 1 'R' lock for 710221 usec. ``` And each of the above is paired with a `GOT` log, like this: diff --git a/src/libnetdata/log/log.c b/src/libnetdata/log/log.c index a31127c42d286e..bbb0eb23e62393 100644 --- a/src/libnetdata/log/log.c +++ b/src/libnetdata/log/log.c @@ -27,12 +27,8 @@ #endif const char *program_name = ""; - uint64_t debug_flags = 0; - -#ifdef ENABLE_ACLK int aclklog_enabled = 0; -#endif // ---------------------------------------------------------------------------- @@ -467,7 +463,7 @@ static struct { .spinlock = NETDATA_SPINLOCK_INITIALIZER, .method = NDLM_DEFAULT, .format = NDLF_LOGFMT, - .filename = LOG_DIR "/collectors.log", + .filename = LOG_DIR "/collector.log", .fd = STDERR_FILENO, .fp = NULL, .min_priority = NDLP_INFO, @@ -518,7 +514,7 @@ __attribute__((constructor)) void initialize_invocation_id(void) { char uuid[UUID_COMPACT_STR_LEN]; uuid_unparse_lower_compact(nd_log.invocation_id, uuid); - setenv("NETDATA_INVOCATION_ID", uuid, 1); + nd_setenv("NETDATA_INVOCATION_ID", uuid, 1); } int nd_log_health_fd(void) { @@ -658,9 +654,9 @@ void nd_log_set_user_settings(ND_LOG_SOURCES source, const char *setting) { else method = NDLM_STDERR; - setenv("NETDATA_LOG_METHOD", nd_log_id2method(method), 1); - setenv("NETDATA_LOG_FORMAT", nd_log_id2format(format), 1); - setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1); + nd_setenv("NETDATA_LOG_METHOD", nd_log_id2method(method), 1); + nd_setenv("NETDATA_LOG_FORMAT", nd_log_id2format(format), 1); + nd_setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1); } } @@ -680,7 +676,7 @@ void nd_log_set_priority_level(const char *setting) { } // the right one - setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1); + nd_setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1); } void nd_log_set_facility(const char *facility) { @@ -688,7 +684,7 @@ void nd_log_set_facility(const char *facility) { facility = "daemon"; nd_log.syslog.facility = nd_log_facility2id(facility); - setenv("NETDATA_SYSLOG_FACILITY", nd_log_id2facility(nd_log.syslog.facility), 1); + nd_setenv("NETDATA_SYSLOG_FACILITY", nd_log_id2facility(nd_log.syslog.facility), 1); } void nd_log_set_flood_protection(size_t logs, time_t period) { @@ -702,9 +698,9 @@ void nd_log_set_flood_protection(size_t logs, time_t period) { char buf[100]; snprintfz(buf, sizeof(buf), "%" PRIu64, (uint64_t )period); - setenv("NETDATA_ERRORS_THROTTLE_PERIOD", buf, 1); + nd_setenv("NETDATA_ERRORS_THROTTLE_PERIOD", buf, 1); snprintfz(buf, sizeof(buf), "%" PRIu64, (uint64_t )logs); - setenv("NETDATA_ERRORS_PER_PERIOD", buf, 1); + nd_setenv("NETDATA_ERRORS_PER_PERIOD", buf, 1); } static bool nd_log_journal_systemd_init(void) { @@ -719,7 +715,7 @@ static bool nd_log_journal_systemd_init(void) { static void nd_log_journal_direct_set_env(void) { if(nd_log.sources[NDLS_COLLECTORS].method == NDLM_JOURNAL) - setenv("NETDATA_SYSTEMD_JOURNAL_PATH", nd_log.journal_direct.filename, 1); + nd_setenv("NETDATA_SYSTEMD_JOURNAL_PATH", nd_log.journal_direct.filename, 1); } static bool nd_log_journal_direct_init(const char *path) { @@ -776,8 +772,8 @@ static void nd_log_syslog_init() { void nd_log_initialize_for_external_plugins(const char *name) { // if we don't run under Netdata, log to stderr, // otherwise, use the logging method Netdata wants us to use. - setenv("NETDATA_LOG_METHOD", "stderr", 0); - setenv("NETDATA_LOG_FORMAT", "logfmt", 0); + nd_setenv("NETDATA_LOG_METHOD", "stderr", 0); + nd_setenv("NETDATA_LOG_FORMAT", "logfmt", 0); nd_log.overwrite_process_source = NDLS_COLLECTORS; program_name = name; diff --git a/src/libnetdata/log/log.h b/src/libnetdata/log/log.h index 015c02eb64aaf4..79df798a5f09b8 100644 --- a/src/libnetdata/log/log.h +++ b/src/libnetdata/log/log.h @@ -16,7 +16,7 @@ typedef enum __attribute__((__packed__)) { NDLS_UNSET = 0, // internal use only NDLS_ACCESS, // access.log NDLS_ACLK, // aclk.log - NDLS_COLLECTORS, // collectors.log + NDLS_COLLECTORS, // collector.log NDLS_DAEMON, // error.log NDLS_HEALTH, // health.log NDLS_DEBUG, // debug.log @@ -238,12 +238,8 @@ void log_stack_push(struct log_stack_entry *lgs); #define D_SYSTEM 0x8000000000000000 extern uint64_t debug_flags; - extern const char *program_name; - -#ifdef ENABLE_ACLK extern int aclklog_enabled; -#endif #define LOG_DATE_LENGTH 26 void log_date(char *buffer, size_t len, time_t now); diff --git a/src/libnetdata/maps/local-sockets.h b/src/libnetdata/maps/local-sockets.h index 6f2ffd81a7abfe..6c08ed2a98c373 100644 --- a/src/libnetdata/maps/local-sockets.h +++ b/src/libnetdata/maps/local-sockets.h @@ -223,7 +223,7 @@ typedef struct local_socket { #endif } LOCAL_SOCKET; -static inline void local_sockets_spawn_server_callback(SPAWN_REQUEST *request); +static inline int local_sockets_spawn_server_callback(SPAWN_REQUEST *request); // -------------------------------------------------------------------------------------------------------------------- @@ -1145,7 +1145,7 @@ static inline void local_sockets_send_to_parent(struct local_socket_state *ls __ local_sockets_log(ls, "failed to write cmdline to pipe"); } -static inline void local_sockets_spawn_server_callback(SPAWN_REQUEST *request) { +static inline int local_sockets_spawn_server_callback(SPAWN_REQUEST *request) { LS_STATE ls = { 0 }; ls.config = *((struct local_sockets_config *)request->data); @@ -1172,7 +1172,7 @@ static inline void local_sockets_spawn_server_callback(SPAWN_REQUEST *request) { // switch namespace using the custom fd passed via the spawn server if (setns(request->fds[3], CLONE_NEWNET) == -1) { local_sockets_log(&ls, "failed to switch network namespace at child process using fd %d", request->fds[3]); - exit(EXIT_FAILURE); + return EXIT_FAILURE; } // read all sockets from /proc @@ -1187,7 +1187,9 @@ static inline void local_sockets_spawn_server_callback(SPAWN_REQUEST *request) { }; local_sockets_send_to_parent(&ls, &zero, &cw); - exit(EXIT_SUCCESS); + local_sockets_cleanup(&ls); + + return EXIT_SUCCESS; } static inline bool local_sockets_get_namespace_sockets_with_pid(LS_STATE *ls, struct pid_socket *ps) { diff --git a/src/libnetdata/os/close_range.c b/src/libnetdata/os/close_range.c index 56d5c2527ad361..2ee5837ee7ca52 100644 --- a/src/libnetdata/os/close_range.c +++ b/src/libnetdata/os/close_range.c @@ -7,6 +7,12 @@ static int fd_is_valid(int fd) { return fcntl(fd, F_GETFD) != -1 || errno != EBADF; } +static void setcloexec(int fd) { + int flags = fcntl(fd, F_GETFD); + if (flags != -1) + (void) fcntl(fd, F_SETFD, flags | FD_CLOEXEC); +} + int os_get_fd_open_max(void) { static int fd_open_max = CLOSE_RANGE_FD_MAX; @@ -33,9 +39,9 @@ int os_get_fd_open_max(void) { return fd_open_max; } -void os_close_range(int first, int last) { +void os_close_range(int first, int last, int flags) { #if defined(HAVE_CLOSE_RANGE) - if(close_range(first, last, 0) == 0) return; + if(close_range(first, last, flags) == 0) return; #endif #if defined(OS_LINUX) @@ -44,8 +50,12 @@ void os_close_range(int first, int last) { struct dirent *entry; while ((entry = readdir(dir)) != NULL) { int fd = str2i(entry->d_name); - if (fd >= first && (last == CLOSE_RANGE_FD_MAX || fd <= last) && fd_is_valid(fd)) - (void)close(fd); + if (fd >= first && (last == CLOSE_RANGE_FD_MAX || fd <= last) && fd_is_valid(fd)) { + if(flags & CLOSE_RANGE_CLOEXEC) + setcloexec(fd); + else + (void)close(fd); + } } closedir(dir); return; @@ -57,7 +67,12 @@ void os_close_range(int first, int last) { last = os_get_fd_open_max(); for (int fd = first; fd <= last; fd++) { - if (fd_is_valid(fd)) (void)close(fd); + if (fd_is_valid(fd)) { + if(flags & CLOSE_RANGE_CLOEXEC) + setcloexec(fd); + else + (void)close(fd); + } } } @@ -67,9 +82,9 @@ static int compare_ints(const void *a, const void *b) { return (int_a > int_b) - (int_a < int_b); } -void os_close_all_non_std_open_fds_except(const int fds[], size_t fds_num) { +void os_close_all_non_std_open_fds_except(const int fds[], size_t fds_num, int flags) { if (fds_num == 0 || fds == NULL) { - os_close_range(STDERR_FILENO + 1, CLOSE_RANGE_FD_MAX); + os_close_range(STDERR_FILENO + 1, CLOSE_RANGE_FD_MAX, flags); return; } @@ -89,10 +104,10 @@ void os_close_all_non_std_open_fds_except(const int fds[], size_t fds_num) { // call os_close_range() as many times as needed for (; i < fds_num; i++) { if (fds_copy[i] > start) - os_close_range(start, fds_copy[i] - 1); + os_close_range(start, fds_copy[i] - 1, flags); start = fds_copy[i] + 1; } - os_close_range(start, CLOSE_RANGE_FD_MAX); + os_close_range(start, CLOSE_RANGE_FD_MAX, flags); } diff --git a/src/libnetdata/os/close_range.h b/src/libnetdata/os/close_range.h index e3cb93798ac199..7914ac3f63f137 100644 --- a/src/libnetdata/os/close_range.h +++ b/src/libnetdata/os/close_range.h @@ -5,8 +5,16 @@ #define CLOSE_RANGE_FD_MAX (int)(~0U) +#ifndef CLOSE_RANGE_UNSHARE +#define CLOSE_RANGE_UNSHARE (1U << 1) +#endif + +#ifndef CLOSE_RANGE_CLOEXEC +#define CLOSE_RANGE_CLOEXEC (1U << 2) +#endif + int os_get_fd_open_max(void); -void os_close_range(int first, int last); -void os_close_all_non_std_open_fds_except(const int fds[], size_t fds_num); +void os_close_range(int first, int last, int flags); +void os_close_all_non_std_open_fds_except(const int fds[], size_t fds_num, int flags); #endif //CLOSE_RANGE_H diff --git a/src/libnetdata/os/setenv.c b/src/libnetdata/os/setenv.c index 5aa4302b8b6d81..156c8375fda6fa 100644 --- a/src/libnetdata/os/setenv.c +++ b/src/libnetdata/os/setenv.c @@ -2,12 +2,15 @@ #include "config.h" -#ifndef HAVE_SETENV - #include #include #include +#if defined(OS_WINDOWS) +#include +#endif + +#ifndef HAVE_SETENV int os_setenv(const char *name, const char *value, int overwrite) { char *env_var; int result; @@ -28,3 +31,21 @@ int os_setenv(const char *name, const char *value, int overwrite) { } #endif + +void nd_setenv(const char *name, const char *value, int overwrite) { +#if defined(OS_WINDOWS) + if(overwrite) + SetEnvironmentVariable(name, value); + else { + char buf[1024]; + if(GetEnvironmentVariable(name, buf, sizeof(buf)) == 0) + SetEnvironmentVariable(name, value); + } +#endif + +#ifdef HAVE_SETENV + setenv(name, value, overwrite); +#else + os_setenv(name, value, overwite); +#endif +} diff --git a/src/libnetdata/os/setenv.h b/src/libnetdata/os/setenv.h index 3ed63714c424ae..78e7224ded04f8 100644 --- a/src/libnetdata/os/setenv.h +++ b/src/libnetdata/os/setenv.h @@ -10,4 +10,6 @@ int os_setenv(const char *name, const char *value, int overwrite); #define setenv(name, value, overwrite) os_setenv(name, value, overwrite) #endif +void nd_setenv(const char *name, const char *value, int overwrite); + #endif //NETDATA_SETENV_H diff --git a/src/libnetdata/paths/paths.c b/src/libnetdata/paths/paths.c new file mode 100644 index 00000000000000..c68ee805f262cc --- /dev/null +++ b/src/libnetdata/paths/paths.c @@ -0,0 +1,327 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "paths.h" + +static int is_procfs(const char *path, char **reason) { +#if defined(__APPLE__) || defined(__FreeBSD__) + (void)path; + (void)reason; +#else + struct statfs stat; + + if (statfs(path, &stat) == -1) { + if (reason) + *reason = "failed to statfs()"; + return -1; + } + +#if defined PROC_SUPER_MAGIC + if (stat.f_type != PROC_SUPER_MAGIC) { + if (reason) + *reason = "type is not procfs"; + return -1; + } +#endif + +#endif + + return 0; +} + +static int is_sysfs(const char *path, char **reason) { +#if defined(__APPLE__) || defined(__FreeBSD__) + (void)path; + (void)reason; +#else + struct statfs stat; + + if (statfs(path, &stat) == -1) { + if (reason) + *reason = "failed to statfs()"; + return -1; + } + +#if defined SYSFS_MAGIC + if (stat.f_type != SYSFS_MAGIC) { + if (reason) + *reason = "type is not sysfs"; + return -1; + } +#endif + +#endif + + return 0; +} + +int verify_netdata_host_prefix(bool log_msg) { + if(!netdata_configured_host_prefix) + netdata_configured_host_prefix = ""; + + if(!*netdata_configured_host_prefix) + return 0; + + char path[FILENAME_MAX]; + char *reason = "unknown reason"; + errno_clear(); + + strncpyz(path, netdata_configured_host_prefix, sizeof(path) - 1); + + struct stat sb; + if (stat(path, &sb) == -1) { + reason = "failed to stat()"; + goto failed; + } + + if((sb.st_mode & S_IFMT) != S_IFDIR) { + errno = EINVAL; + reason = "is not a directory"; + goto failed; + } + + snprintfz(path, sizeof(path), "%s/proc", netdata_configured_host_prefix); + if(is_procfs(path, &reason) == -1) + goto failed; + + snprintfz(path, sizeof(path), "%s/sys", netdata_configured_host_prefix); + if(is_sysfs(path, &reason) == -1) + goto failed; + + if (netdata_configured_host_prefix && *netdata_configured_host_prefix) { + if (log_msg) + netdata_log_info("Using host prefix directory '%s'", netdata_configured_host_prefix); + } + + return 0; + +failed: + if (log_msg) + netdata_log_error("Ignoring host prefix '%s': path '%s' %s", netdata_configured_host_prefix, path, reason); + + netdata_configured_host_prefix = ""; + return -1; +} + +size_t filename_from_path_entry(char out[FILENAME_MAX], const char *path, const char *entry, const char *extension) { + if(unlikely(!path || !*path)) path = "."; + if(unlikely(!entry)) entry = ""; + + // skip trailing slashes in path + size_t len = strlen(path); + while(len > 0 && path[len - 1] == '/') len--; + + // skip leading slashes in subpath + while(entry[0] == '/') entry++; + + // if the last character in path is / and (there is a subpath or path is now empty) + // keep the trailing slash in path and remove the additional slash + char *slash = "/"; + if(path[len] == '/' && (*entry || len == 0)) { + slash = ""; + len++; + } + else if(!*entry) { + // there is no entry + // no need for trailing slash + slash = ""; + } + + return snprintfz(out, FILENAME_MAX, "%.*s%s%s%s%s", (int)len, path, slash, entry, + extension && *extension ? "." : "", + extension && *extension ? extension : ""); +} + +char *filename_from_path_entry_strdupz(const char *path, const char *entry) { + char filename[FILENAME_MAX]; + filename_from_path_entry(filename, path, entry, NULL); + return strdupz(filename); +} + +bool filename_is_dir(const char *filename, bool create_it) { + CLEAN_CHAR_P *buffer = NULL; + + size_t max_links = 100; + + bool is_dir = false; + struct stat st; + while(max_links && stat(filename, &st) == 0) { + if ((st.st_mode & S_IFMT) == S_IFDIR) + is_dir = true; + else if ((st.st_mode & S_IFMT) == S_IFLNK) { + max_links--; + + if(!buffer) + buffer = mallocz(FILENAME_MAX); + + char link_dst[FILENAME_MAX]; + ssize_t l = readlink(filename, link_dst, FILENAME_MAX - 1); + if (l > 0) { + link_dst[l] = '\0'; + strncpyz(buffer, link_dst, FILENAME_MAX - 1); + filename = buffer; + continue; + } + } + + break; + } + + if(!is_dir && create_it && max_links == 100 && mkdir(filename, 0750) == 0) + is_dir = true; + + return is_dir; +} + +bool path_entry_is_dir(const char *path, const char *entry, bool create_it) { + char filename[FILENAME_MAX]; + filename_from_path_entry(filename, path, entry, NULL); + return filename_is_dir(filename, create_it); +} + +bool filename_is_file(const char *filename) { + CLEAN_CHAR_P *buffer = NULL; + + size_t max_links = 100; + + bool is_file = false; + struct stat st; + while(max_links && stat(filename, &st) == 0) { + if((st.st_mode & S_IFMT) == S_IFREG) + is_file = true; + else if((st.st_mode & S_IFMT) == S_IFLNK) { + max_links--; + + if(!buffer) + buffer = mallocz(FILENAME_MAX); + + char link_dst[FILENAME_MAX]; + ssize_t l = readlink(filename, link_dst, FILENAME_MAX - 1); + if(l > 0) { + link_dst[l] = '\0'; + strncpyz(buffer, link_dst, FILENAME_MAX - 1); + filename = buffer; + continue; + } + } + + break; + } + + return is_file; +} + +bool path_entry_is_file(const char *path, const char *entry) { + char filename[FILENAME_MAX]; + filename_from_path_entry(filename, path, entry, NULL); + return filename_is_file(filename); +} + +void recursive_config_double_dir_load(const char *user_path, const char *stock_path, const char *entry, int (*callback)(const char *filename, void *data, bool stock_config), void *data, size_t depth) { + if(depth > 3) { + netdata_log_error("CONFIG: Max directory depth reached while reading user path '%s', stock path '%s', subpath '%s'", user_path, stock_path, + entry); + return; + } + + if(!stock_path) + stock_path = user_path; + + char *udir = filename_from_path_entry_strdupz(user_path, entry); + char *sdir = filename_from_path_entry_strdupz(stock_path, entry); + + netdata_log_debug(D_HEALTH, "CONFIG traversing user-config directory '%s', stock config directory '%s'", udir, sdir); + + DIR *dir = opendir(udir); + if (!dir) { + netdata_log_error("CONFIG cannot open user-config directory '%s'.", udir); + } + else { + struct dirent *de = NULL; + while((de = readdir(dir))) { + if(de->d_type == DT_DIR || de->d_type == DT_LNK) { + if( !de->d_name[0] || + (de->d_name[0] == '.' && de->d_name[1] == '\0') || + (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') + ) { + netdata_log_debug(D_HEALTH, "CONFIG ignoring user-config directory '%s/%s'", udir, de->d_name); + continue; + } + + if(path_entry_is_dir(udir, de->d_name, false)) { + recursive_config_double_dir_load(udir, sdir, de->d_name, callback, data, depth + 1); + continue; + } + } + + if(de->d_type == DT_UNKNOWN || de->d_type == DT_REG || de->d_type == DT_LNK) { + size_t len = strlen(de->d_name); + if(path_entry_is_file(udir, de->d_name) && + len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { + char *filename = filename_from_path_entry_strdupz(udir, de->d_name); + netdata_log_debug(D_HEALTH, "CONFIG calling callback for user file '%s'", filename); + callback(filename, data, false); + freez(filename); + continue; + } + } + + netdata_log_debug(D_HEALTH, "CONFIG ignoring user-config file '%s/%s' of type %d", udir, de->d_name, (int)de->d_type); + } + + closedir(dir); + } + + netdata_log_debug(D_HEALTH, "CONFIG traversing stock config directory '%s', user config directory '%s'", sdir, udir); + + dir = opendir(sdir); + if (!dir) { + netdata_log_error("CONFIG cannot open stock config directory '%s'.", sdir); + } + else { + if (strcmp(udir, sdir)) { + struct dirent *de = NULL; + while((de = readdir(dir))) { + if(de->d_type == DT_DIR || de->d_type == DT_LNK) { + if( !de->d_name[0] || + (de->d_name[0] == '.' && de->d_name[1] == '\0') || + (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') + ) { + netdata_log_debug(D_HEALTH, "CONFIG ignoring stock config directory '%s/%s'", sdir, de->d_name); + continue; + } + + if(path_entry_is_dir(sdir, de->d_name, false)) { + // we recurse in stock subdirectory, only when there is no corresponding + // user subdirectory - to avoid reading the files twice + + if(!path_entry_is_dir(udir, de->d_name, false)) + recursive_config_double_dir_load(udir, sdir, de->d_name, callback, data, depth + 1); + + continue; + } + } + + if(de->d_type == DT_UNKNOWN || de->d_type == DT_REG || de->d_type == DT_LNK) { + size_t len = strlen(de->d_name); + if(path_entry_is_file(sdir, de->d_name) && !path_entry_is_file(udir, de->d_name) && + len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { + char *filename = filename_from_path_entry_strdupz(sdir, de->d_name); + netdata_log_debug(D_HEALTH, "CONFIG calling callback for stock file '%s'", filename); + callback(filename, data, true); + freez(filename); + continue; + } + + } + + netdata_log_debug(D_HEALTH, "CONFIG ignoring stock-config file '%s/%s' of type %d", udir, de->d_name, (int)de->d_type); + } + } + closedir(dir); + } + + netdata_log_debug(D_HEALTH, "CONFIG done traversing user-config directory '%s', stock config directory '%s'", udir, sdir); + + freez(udir); + freez(sdir); +} diff --git a/src/libnetdata/paths/paths.h b/src/libnetdata/paths/paths.h new file mode 100644 index 00000000000000..9c5a8a748bfe88 --- /dev/null +++ b/src/libnetdata/paths/paths.h @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PATHS_H +#define NETDATA_PATHS_H + +#include "../libnetdata.h" + +size_t filename_from_path_entry(char out[FILENAME_MAX], const char *path, const char *entry, const char *extension); +char *filename_from_path_entry_strdupz(const char *path, const char *entry); + +bool filename_is_file(const char *filename); +bool filename_is_dir(const char *filename, bool create_it); + +bool path_entry_is_file(const char *path, const char *entry); +bool path_entry_is_dir(const char *path, const char *entry, bool create_it); + +void recursive_config_double_dir_load( + const char *user_path + , const char *stock_path + , const char *entry + , int (*callback)(const char *filename, void *data, bool stock_config) + , void *data + , size_t depth +); + +#endif //NETDATA_PATHS_H diff --git a/src/libnetdata/required_dummies.h b/src/libnetdata/required_dummies.h index 3b23b87f7ddda1..5bc2ded79e6895 100644 --- a/src/libnetdata/required_dummies.h +++ b/src/libnetdata/required_dummies.h @@ -13,11 +13,6 @@ void netdata_cleanup_and_exit(int ret, const char *action, const char *action_re exit(ret); } -// callbacks required by popen() -void signals_block(void){} -void signals_unblock(void){} -void signals_reset(void){} - void rrdset_thread_rda_free(void){} void sender_thread_buffer_free(void){} void query_target_free(void){} diff --git a/src/libnetdata/socket/security.c b/src/libnetdata/socket/security.c index 502998b79ffa56..4c603610aa2dd7 100644 --- a/src/libnetdata/socket/security.c +++ b/src/libnetdata/socket/security.c @@ -1,7 +1,5 @@ #include "../libnetdata.h" -#ifdef ENABLE_HTTPS - SSL_CTX *netdata_ssl_exporting_ctx =NULL; SSL_CTX *netdata_ssl_streaming_sender_ctx =NULL; SSL_CTX *netdata_ssl_web_server_ctx =NULL; @@ -751,4 +749,3 @@ int ssl_security_location_for_context(SSL_CTX *ctx, char *file, char *path) { return 0; } -#endif diff --git a/src/libnetdata/socket/security.h b/src/libnetdata/socket/security.h index 283d81db856b80..6a981a955108fe 100644 --- a/src/libnetdata/socket/security.h +++ b/src/libnetdata/socket/security.h @@ -12,8 +12,6 @@ typedef enum __attribute__((packed)) { #define NETDATA_SSL_STREAMING_SENDER_CTX 1 #define NETDATA_SSL_EXPORTING_CTX 2 -# ifdef ENABLE_HTTPS - #define OPENSSL_VERSION_095 0x00905100L #define OPENSSL_VERSION_097 0x0907000L #define OPENSSL_VERSION_110 0x10100000L @@ -73,5 +71,4 @@ ssize_t netdata_ssl_write(NETDATA_SSL *ssl, const void *buf, size_t num); ssize_t netdata_ssl_pending(NETDATA_SSL *ssl); bool netdata_ssl_has_pending(NETDATA_SSL *ssl); -# endif //ENABLE_HTTPS #endif //NETDATA_SECURITY_H diff --git a/src/libnetdata/socket/socket.c b/src/libnetdata/socket/socket.c index 7170a396379caf..f0925038e4095b 100644 --- a/src/libnetdata/socket/socket.c +++ b/src/libnetdata/socket/socket.c @@ -515,7 +515,6 @@ HTTP_ACL socket_ssl_acl(char *acl) { //Due the format of the SSL command it is always the last command, //we finish it here to avoid problems with the ACLs *ssl = '\0'; -#ifdef ENABLE_HTTPS ssl++; if (!strncmp("SSL=",ssl,4)) { ssl += 4; @@ -526,7 +525,6 @@ HTTP_ACL socket_ssl_acl(char *acl) { return HTTP_ACL_SSL_FORCE; } } -#endif } return HTTP_ACL_NONE; @@ -927,9 +925,7 @@ int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t int timeout_ms = timeout->tv_sec * 1000 + timeout->tv_usec / 1000; switch(wait_on_socket_or_cancel_with_timeout( -#ifdef ENABLE_HTTPS - NULL, -#endif + NULL, fd, timeout_ms, POLLOUT, NULL)) { case 0: // proceed nd_log(NDLS_DAEMON, NDLP_DEBUG, @@ -1172,9 +1168,7 @@ int connect_to_one_of_urls(const char *destination, int default_port, struct tim // returns: -1 = thread cancelled, 0 = proceed to read/write, 1 = time exceeded, 2 = error on fd // timeout parameter can be zero to wait forever inline int wait_on_socket_or_cancel_with_timeout( -#ifdef ENABLE_HTTPS NETDATA_SSL *ssl, -#endif int fd, int timeout_ms, short int poll_events, short int *revents) { struct pollfd pfd = { .fd = fd, @@ -1190,10 +1184,8 @@ inline int wait_on_socket_or_cancel_with_timeout( return -1; } -#ifdef ENABLE_HTTPS if(poll_events == POLLIN && ssl && SSL_connection(ssl) && netdata_ssl_has_pending(ssl)) return 0; -#endif const int wait_ms = (timeout_ms >= ND_CHECK_CANCELLABILITY_WHILE_WAITING_EVERY_MS || forever) ? ND_CHECK_CANCELLABILITY_WHILE_WAITING_EVERY_MS : timeout_ms; @@ -1233,16 +1225,10 @@ inline int wait_on_socket_or_cancel_with_timeout( return 1; } -ssize_t recv_timeout( -#ifdef ENABLE_HTTPS - NETDATA_SSL *ssl, -#endif - int sockfd, void *buf, size_t len, int flags, int timeout) { +ssize_t recv_timeout(NETDATA_SSL *ssl, int sockfd, void *buf, size_t len, int flags, int timeout) { switch(wait_on_socket_or_cancel_with_timeout( -#ifdef ENABLE_HTTPS - ssl, -#endif + ssl, sockfd, timeout * 1000, POLLIN, NULL)) { case 0: // data are waiting break; @@ -1256,25 +1242,16 @@ ssize_t recv_timeout( return -1; } -#ifdef ENABLE_HTTPS - if (SSL_connection(ssl)) { + if (SSL_connection(ssl)) return netdata_ssl_read(ssl, buf, len); - } -#endif return recv(sockfd, buf, len, flags); } -ssize_t send_timeout( -#ifdef ENABLE_HTTPS - NETDATA_SSL *ssl, -#endif - int sockfd, void *buf, size_t len, int flags, int timeout) { +ssize_t send_timeout(NETDATA_SSL *ssl, int sockfd, void *buf, size_t len, int flags, int timeout) { switch(wait_on_socket_or_cancel_with_timeout( -#ifdef ENABLE_HTTPS - ssl, -#endif + ssl, sockfd, timeout * 1000, POLLOUT, NULL)) { case 0: // data are waiting break; @@ -1288,7 +1265,6 @@ ssize_t send_timeout( return -1; } -#ifdef ENABLE_HTTPS if(ssl->conn) { if (SSL_connection(ssl)) { return netdata_ssl_write(ssl, buf, len); @@ -1300,7 +1276,7 @@ ssize_t send_timeout( return -1; } } -#endif + return send(sockfd, buf, len, flags); } diff --git a/src/libnetdata/socket/socket.h b/src/libnetdata/socket/socket.h index 8eab8bfdd5d4c7..04e6b65215927c 100644 --- a/src/libnetdata/socket/socket.h +++ b/src/libnetdata/socket/socket.h @@ -39,15 +39,9 @@ int connect_to_one_of(const char *destination, int default_port, struct timeval int connect_to_one_of_urls(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size); -#ifdef ENABLE_HTTPS ssize_t recv_timeout(NETDATA_SSL *ssl,int sockfd, void *buf, size_t len, int flags, int timeout); ssize_t send_timeout(NETDATA_SSL *ssl,int sockfd, void *buf, size_t len, int flags, int timeout); int wait_on_socket_or_cancel_with_timeout(NETDATA_SSL *ssl, int fd, int timeout_ms, short int poll_events, short int *revents); -#else -ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout); -ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout); -int wait_on_socket_or_cancel_with_timeout(int fd, int timeout_ms, short int poll_events, short int *revents); -#endif bool fd_is_socket(int fd); bool sock_has_output_error(int fd); diff --git a/src/libnetdata/spawn_server/spawn-tester.c b/src/libnetdata/spawn_server/spawn-tester.c new file mode 100644 index 00000000000000..fbd9431ac72fd2 --- /dev/null +++ b/src/libnetdata/spawn_server/spawn-tester.c @@ -0,0 +1,493 @@ +#include "libnetdata/libnetdata.h" +#include "libnetdata/required_dummies.h" + +#define ENV_VAR_KEY "SPAWN_TESTER" +#define ENV_VAR_VALUE "1234567890" + +size_t warnings = 0; + +void child_check_environment(void) { + const char *s = getenv(ENV_VAR_KEY); + if(!s || !*s || strcmp(s, ENV_VAR_VALUE) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Wrong environment. Variable '%s' should have value '%s' but it has '%s'", + ENV_VAR_KEY, ENV_VAR_VALUE, s ? s : "(unset)"); + + exit(1); + } +} + +static bool is_valid_fd(int fd) { + errno_clear(); + return fcntl(fd, F_GETFD) != -1 || errno != EBADF; +} + +void child_check_fds(void) { + for(int fd = 0; fd < 3; fd++) { + if(!is_valid_fd(fd)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "fd No %d should be a valid file descriptor - but it isn't.", fd); + + exit(1); + } + } + + for(int fd = 3; fd < /* os_get_fd_open_max() */ 1024; fd++) { + if(is_valid_fd(fd)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "fd No %d is a valid file descriptor - it shouldn't.", fd); + + exit(1); + } + } + + errno_clear(); +} + +// -------------------------------------------------------------------------------------------------------------------- +// kill to stop + +int plugin_kill_to_stop() { + child_check_fds(); + child_check_environment(); + + char buffer[1024]; + while (fgets(buffer, sizeof(buffer), stdin) != NULL) { + fprintf(stderr, "+"); + printf("%s", buffer); + fflush(stdout); + } + + return 0; +} + +void test_int_fds_plugin_kill_to_stop(SPAWN_SERVER *server, const char *argv0) { + const char *params[] = { + argv0, + "plugin-kill-to-stop", + NULL, + }; + + SPAWN_INSTANCE *si = spawn_server_exec(server, STDERR_FILENO, 0, params, NULL, 0, SPAWN_INSTANCE_TYPE_EXEC); + if(!si) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot run myself as plugin (spawn)"); + exit(1); + } + + const char *msg = "Hello World!\n"; + ssize_t len = strlen(msg); + char buffer[len * 2]; + + for(size_t j = 0; j < 30 ;j++) { + fprintf(stderr, "-"); + memset(buffer, 0, sizeof(buffer)); + + ssize_t rc = write(spawn_server_instance_write_fd(si), msg, len); + + if (rc != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot write to plugin. Expected to write %zd bytes, wrote %zd bytes", + len, rc); + exit(1); + } + + rc = read(spawn_server_instance_read_fd(si), buffer, sizeof(buffer)); + if (rc != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %zd bytes, read %zd bytes", + len, rc); + exit(1); + } + + if (memcmp(msg, buffer, len) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Read corrupted data. Expected '%s', Read '%s'", + msg, buffer); + exit(1); + } + } + fprintf(stderr, "\n"); + + int code = spawn_server_exec_kill(server, si); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "child exited with code %d", + code); + + if(code != 15 && code != 0) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "child should exit with code 0 or 15, but exited with code %d", code); + warnings++; + } +} + +void test_popen_plugin_kill_to_stop(const char *argv0) { + char cmd[FILENAME_MAX + 100]; + snprintfz(cmd, sizeof(cmd), "exec %s plugin-kill-to-stop", argv0); + POPEN_INSTANCE *pi = spawn_popen_run(cmd); + if(!pi) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot run myself as plugin (popen)"); + exit(1); + } + + const char *msg = "Hello World!\n"; + size_t len = strlen(msg); + char buffer[len * 2]; + + for(size_t j = 0; j < 30 ;j++) { + fprintf(stderr, "-"); + memset(buffer, 0, sizeof(buffer)); + + size_t rc = fwrite(msg, 1, len, spawn_popen_stdin(pi)); + if (rc != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot write to plugin. Expected to write %zu bytes, wrote %zu bytes", + len, rc); + exit(1); + } + fflush(spawn_popen_stdin(pi)); + + char *s = fgets(buffer, sizeof(buffer), spawn_popen_stdout(pi)); + if (!s || strlen(s) != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %zu bytes, read %zu bytes", + len, (size_t)(s ? strlen(s) : 0)); + exit(1); + } + if (memcmp(msg, buffer, len) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Read corrupted data. Expected '%s', Read '%s'", + msg, buffer); + exit(1); + } + } + fprintf(stderr, "\n"); + + int code = spawn_popen_kill(pi); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "child exited with code %d", + code); + + if(code != 0) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "child should exit with code 0, but exited with code %d", code); + warnings++; + } +} + +// -------------------------------------------------------------------------------------------------------------------- +// close to stop + +int plugin_close_to_stop() { + child_check_fds(); + child_check_environment(); + + char buffer[1024]; + while (fgets(buffer, sizeof(buffer), stdin) != NULL) { + fprintf(stderr, "+"); + printf("%s", buffer); + fflush(stdout); + } + + nd_log(NDLS_COLLECTORS, NDLP_ERR, "child detected a closed pipe."); + exit(1); +} + +void test_int_fds_plugin_close_to_stop(SPAWN_SERVER *server, const char *argv0) { + const char *params[] = { + argv0, + "plugin-close-to-stop", + NULL, + }; + + SPAWN_INSTANCE *si = spawn_server_exec(server, STDERR_FILENO, 0, params, NULL, 0, SPAWN_INSTANCE_TYPE_EXEC); + if(!si) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot run myself as plugin (spawn)"); + exit(1); + } + + const char *msg = "Hello World!\n"; + ssize_t len = strlen(msg); + char buffer[len * 2]; + + for(size_t j = 0; j < 30 ;j++) { + fprintf(stderr, "-"); + memset(buffer, 0, sizeof(buffer)); + + ssize_t rc = write(spawn_server_instance_write_fd(si), msg, len); + if (rc != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot write to plugin. Expected to write %zd bytes, wrote %zd bytes", + len, rc); + exit(1); + } + + rc = read(spawn_server_instance_read_fd(si), buffer, sizeof(buffer)); + if (rc != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %zd bytes, read %zd bytes", + len, rc); + exit(1); + } + if (memcmp(msg, buffer, len) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Read corrupted data. Expected '%s', Read '%s'", + msg, buffer); + exit(1); + } + + break; + } + fprintf(stderr, "\n"); + + int code = spawn_server_exec_wait(server, si); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "child exited with code %d", + code); + + if(!WIFEXITED(code) || WEXITSTATUS(code) != 1) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "child should exit with code 1, but exited with code %d", code); + warnings++; + } +} + +void test_popen_plugin_close_to_stop(const char *argv0) { + char cmd[FILENAME_MAX + 100]; + snprintfz(cmd, sizeof(cmd), "exec %s plugin-close-to-stop", argv0); + POPEN_INSTANCE *pi = spawn_popen_run(cmd); + if(!pi) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot run myself as plugin (popen)"); + exit(1); + } + + const char *msg = "Hello World!\n"; + size_t len = strlen(msg); + char buffer[len * 2]; + + for(size_t j = 0; j < 30 ;j++) { + fprintf(stderr, "-"); + memset(buffer, 0, sizeof(buffer)); + + size_t rc = fwrite(msg, 1, len, spawn_popen_stdin(pi)); + if (rc != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot write to plugin. Expected to write %zu bytes, wrote %zu bytes", + len, rc); + exit(1); + } + fflush(spawn_popen_stdin(pi)); + + char *s = fgets(buffer, sizeof(buffer), spawn_popen_stdout(pi)); + if (!s || strlen(s) != len) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %zu bytes, read %zu bytes", + len, (size_t)(s ? strlen(s) : 0)); + exit(1); + } + if (memcmp(msg, buffer, len) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Read corrupted data. Expected '%s', Read '%s'", + msg, buffer); + exit(1); + } + + break; + } + fprintf(stderr, "\n"); + + int code = spawn_popen_wait(pi); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "child exited with code %d", + code); + + if(code != 1) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "child should exit with code 1, but exited with code %d", code); + warnings++; + } +} + +// -------------------------------------------------------------------------------------------------------------------- +// echo and exit + +#define ECHO_AND_EXIT_MSG "GOODBYE\n" + +int plugin_echo_and_exit() { + child_check_fds(); + child_check_environment(); + + printf(ECHO_AND_EXIT_MSG); + exit(0); +} + +void test_int_fds_plugin_echo_and_exit(SPAWN_SERVER *server, const char *argv0) { + const char *params[] = { + argv0, + "plugin-echo-and-exit", + NULL, + }; + + SPAWN_INSTANCE *si = spawn_server_exec(server, STDERR_FILENO, 0, params, NULL, 0, SPAWN_INSTANCE_TYPE_EXEC); + if(!si) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot run myself as plugin (spawn)"); + exit(1); + } + + char buffer[1024]; + size_t reads = 0; + + for(size_t j = 0; j < 30 ;j++) { + fprintf(stderr, "-"); + memset(buffer, 0, sizeof(buffer)); + + ssize_t rc = read(spawn_server_instance_read_fd(si), buffer, sizeof(buffer)); + if(rc <= 0) + break; + + reads++; + + if (rc != strlen(ECHO_AND_EXIT_MSG)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %zu bytes, read %zd bytes", + strlen(ECHO_AND_EXIT_MSG), rc); + exit(1); + } + if (memcmp(ECHO_AND_EXIT_MSG, buffer, strlen(ECHO_AND_EXIT_MSG)) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Read corrupted data. Expected '%s', Read '%s'", + ECHO_AND_EXIT_MSG, buffer); + exit(1); + } + } + fprintf(stderr, "\n"); + + if(reads != 1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %d times, but read %zu", + 1, reads); + exit(1); + } + + int code = spawn_server_exec_wait(server, si); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "child exited with code %d", + code); + + if(code != 0) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "child should exit with code 0, but exited with code %d", code); + warnings++; + } +} + +void test_popen_plugin_echo_and_exit(const char *argv0) { + char cmd[FILENAME_MAX + 100]; + snprintfz(cmd, sizeof(cmd), "exec %s plugin-echo-and-exit", argv0); + POPEN_INSTANCE *pi = spawn_popen_run(cmd); + if(!pi) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot run myself as plugin (popen)"); + exit(1); + } + + char buffer[1024]; + size_t reads = 0; + for(size_t j = 0; j < 30 ;j++) { + fprintf(stderr, "-"); + memset(buffer, 0, sizeof(buffer)); + + char *s = fgets(buffer, sizeof(buffer), spawn_popen_stdout(pi)); + if(!s) break; + reads++; + if (strlen(s) != strlen(ECHO_AND_EXIT_MSG)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %zu bytes, read %zu bytes", + strlen(ECHO_AND_EXIT_MSG), (size_t)(s ? strlen(s) : 0)); + exit(1); + } + if (memcmp(ECHO_AND_EXIT_MSG, buffer, strlen(ECHO_AND_EXIT_MSG)) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Read corrupted data. Expected '%s', Read '%s'", + ECHO_AND_EXIT_MSG, buffer); + exit(1); + } + } + fprintf(stderr, "\n"); + + if(reads != 1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot read from plugin. Expected to read %d times, but read %zu", + 1, reads); + exit(1); + } + + int code = spawn_popen_wait(pi); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "child exited with code %d", + code); + + if(code != 0) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "child should exit with code 0, but exited with code %d", code); + warnings++; + } +} + +// -------------------------------------------------------------------------------------------------------------------- + +int main(int argc, const char **argv) { + if(argc > 1 && strcmp(argv[1], "plugin-kill-to-stop") == 0) + return plugin_kill_to_stop(); + + if(argc > 1 && strcmp(argv[1], "plugin-echo-and-exit") == 0) + return plugin_echo_and_exit(); + + if(argc > 1 && strcmp(argv[1], "plugin-close-to-stop") == 0) + return plugin_close_to_stop(); + + if(argc <= 1 || strcmp(argv[1], "test") != 0) { + fprintf(stderr, "Run me with 'test' parameter!\n"); + exit(1); + } + + nd_setenv(ENV_VAR_KEY, ENV_VAR_VALUE, 1); + + fprintf(stderr, "\n\nTESTING fds\n\n"); + SPAWN_SERVER *server = spawn_server_create(SPAWN_SERVER_OPTION_EXEC, "test", NULL, argc, argv); + if(!server) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot create spawn server"); + exit(1); + } + for(size_t i = 0; i < 5; i++) { + fprintf(stderr, "\n\nTESTING fds No %zu (kill to stop)\n\n", i + 1); + test_int_fds_plugin_kill_to_stop(server, argv[0]); + } + for(size_t i = 0; i < 5; i++) { + fprintf(stderr, "\n\nTESTING fds No %zu (echo and exit)\n\n", i + 1); + test_int_fds_plugin_echo_and_exit(server, argv[0]); + } + for(size_t i = 0; i < 5; i++) { + fprintf(stderr, "\n\nTESTING fds No %zu (close to stop)\n\n", i + 1); + test_int_fds_plugin_close_to_stop(server, argv[0]); + } + spawn_server_destroy(server); + + fprintf(stderr, "\n\nTESTING popen\n\n"); + netdata_main_spawn_server_init("test", argc, argv); + for(size_t i = 0; i < 5; i++) { + fprintf(stderr, "\n\nTESTING popen No %zu (kill to stop)\n\n", i + 1); + test_popen_plugin_kill_to_stop(argv[0]); + } + for(size_t i = 0; i < 5; i++) { + fprintf(stderr, "\n\nTESTING popen No %zu (echo and exit)\n\n", i + 1); + test_popen_plugin_echo_and_exit(argv[0]); + } + for(size_t i = 0; i < 5; i++) { + fprintf(stderr, "\n\nTESTING popen No %zu (close to stop)\n\n", i + 1); + test_popen_plugin_close_to_stop(argv[0]); + } + netdata_main_spawn_server_cleanup(); + + fprintf(stderr, "\n\nTests passed! (%zu warnings)\n\n", warnings); + + exit(0); +} diff --git a/src/libnetdata/spawn_server/spawn_library.c b/src/libnetdata/spawn_server/spawn_library.c new file mode 100644 index 00000000000000..72627cd8452659 --- /dev/null +++ b/src/libnetdata/spawn_server/spawn_library.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "spawn_library.h" + +BUFFER *argv_to_cmdline_buffer(const char **argv) { + BUFFER *wb = buffer_create(0, NULL); + + for(size_t i = 0; argv[i] ;i++) { + const char *s = argv[i]; + size_t len = strlen(s); + buffer_need_bytes(wb, len * 2 + 1); + + bool needs_quotes = false; + for(const char *c = s; !needs_quotes && *c ; c++) { + switch(*c) { + case ' ': + case '\v': + case '\t': + case '\n': + case '"': + needs_quotes = true; + break; + + default: + break; + } + } + + if(needs_quotes && buffer_strlen(wb)) + buffer_strcat(wb, " \""); + else if(buffer_strlen(wb)) + buffer_putc(wb, ' '); + + for(const char *c = s; *c ; c++) { + switch(*c) { + case '"': + buffer_putc(wb, '\\'); + // fall through + + default: + buffer_putc(wb, *c); + break; + } + } + + if(needs_quotes) + buffer_strcat(wb, "\""); + } + + return wb; +} diff --git a/src/libnetdata/spawn_server/spawn_library.h b/src/libnetdata/spawn_server/spawn_library.h new file mode 100644 index 00000000000000..dd00aadf20a4d4 --- /dev/null +++ b/src/libnetdata/spawn_server/spawn_library.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SPAWN_LIBRARY_H +#define NETDATA_SPAWN_LIBRARY_H + +#include "../libnetdata.h" + +BUFFER *argv_to_cmdline_buffer(const char **argv); + +#endif //NETDATA_SPAWN_LIBRARY_H diff --git a/src/libnetdata/spawn_server/spawn_popen.c b/src/libnetdata/spawn_server/spawn_popen.c index f354b1f2a77d6a..64753ccb7d59ee 100644 --- a/src/libnetdata/spawn_server/spawn_popen.c +++ b/src/libnetdata/spawn_server/spawn_popen.c @@ -2,6 +2,16 @@ #include "spawn_popen.h" +#if defined(OS_WINDOWS) +#include +#endif + +struct popen_instance { + SPAWN_INSTANCE *si; + FILE *child_stdin_fp; + FILE *child_stdout_fp; +}; + SPAWN_SERVER *netdata_main_spawn_server = NULL; static SPINLOCK netdata_main_spawn_server_spinlock = NETDATA_SPINLOCK_INITIALIZER; @@ -27,6 +37,30 @@ void netdata_main_spawn_server_cleanup(void) { } } +FILE *spawn_popen_stdin(POPEN_INSTANCE *pi) { + if(!pi->child_stdin_fp) + pi->child_stdin_fp = fdopen(spawn_server_instance_write_fd(pi->si), "w"); + + if(!pi->child_stdin_fp) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot open FILE on child's stdin on fd %d.", + spawn_server_instance_write_fd(pi->si)); + + return pi->child_stdin_fp; +} + +FILE *spawn_popen_stdout(POPEN_INSTANCE *pi) { + if(!pi->child_stdout_fp) + pi->child_stdout_fp = fdopen(spawn_server_instance_read_fd(pi->si), "r"); + + if(!pi->child_stdout_fp) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "Cannot open FILE on child's stdout on fd %d.", + spawn_server_instance_read_fd(pi->si)); + + return pi->child_stdout_fp; +} + POPEN_INSTANCE *spawn_popen_run_argv(const char **argv) { netdata_main_spawn_server_init(NULL, 0, NULL); @@ -35,29 +69,9 @@ POPEN_INSTANCE *spawn_popen_run_argv(const char **argv) { if(si == NULL) return NULL; - POPEN_INSTANCE *pi = mallocz(sizeof(*pi)); + POPEN_INSTANCE *pi = callocz(1, sizeof(*pi)); pi->si = si; - pi->child_stdin_fp = fdopen(spawn_server_instance_write_fd(si), "w"); - pi->child_stdout_fp = fdopen(spawn_server_instance_read_fd(si), "r"); - - if(!pi->child_stdin_fp) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot open FILE on child's stdin on fd %d.", spawn_server_instance_write_fd(si)); - goto cleanup; - } - - if(!pi->child_stdout_fp) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot open FILE on child's stdout on fd %d.", spawn_server_instance_read_fd(si)); - goto cleanup; - } - return pi; - -cleanup: - if(pi->child_stdin_fp) { fclose(pi->child_stdin_fp); spawn_server_instance_write_fd(si); } - if(pi->child_stdout_fp) { fclose(pi->child_stdout_fp); spawn_server_instance_read_fd_unset(si); } - spawn_server_exec_kill(netdata_main_spawn_server, si); - freez(pi); - return NULL; } POPEN_INSTANCE *spawn_popen_run_variadic(const char *cmd, ...) { @@ -92,7 +106,33 @@ POPEN_INSTANCE *spawn_popen_run_variadic(const char *cmd, ...) { POPEN_INSTANCE *spawn_popen_run(const char *cmd) { if(!cmd || !*cmd) return NULL; - + +//#if defined(OS_WINDOWS) +// if(strncmp(cmd, "exec ", 5) == 0) { +// size_t len = strlen(cmd); +// char cmd_copy[strlen(cmd) + 1]; +// memcpy(cmd_copy, cmd, len + 1); +// char *words[100]; +// size_t num_words = quoted_strings_splitter(cmd_copy, words, 100, isspace_map_pluginsd); +// char *exec = get_word(words, num_words, 0); +// char *prog = get_word(words, num_words, 1); +// if (strcmp(exec, "exec") == 0 && +// prog && +// strendswith(prog, ".plugin") && +// !strendswith(prog, "charts.d.plugin") && +// !strendswith(prog, "ioping.plugin")) { +// const char *argv[num_words - 1 + 1]; // remove exec, add terminator +// +// size_t dst = 0; +// for (size_t i = 1; i < num_words; i++) +// argv[dst++] = get_word(words, num_words, i); +// +// argv[dst] = NULL; +// return spawn_popen_run_argv(argv); +// } +// } +//#endif + const char *argv[] = { "/bin/sh", "-c", @@ -121,11 +161,24 @@ static int spawn_popen_status_rc(int status) { return -1; } +static void spawn_popen_close_files(POPEN_INSTANCE *pi) { + if(pi->child_stdin_fp) { + fclose(pi->child_stdin_fp); + pi->child_stdin_fp = NULL; + spawn_server_instance_write_fd_unset(pi->si); + } + + if(pi->child_stdout_fp) { + fclose(pi->child_stdout_fp); + pi->child_stdout_fp = NULL; + spawn_server_instance_read_fd_unset(pi->si); + } +} + int spawn_popen_wait(POPEN_INSTANCE *pi) { if(!pi) return -1; - fclose(pi->child_stdin_fp); pi->child_stdin_fp = NULL; spawn_server_instance_write_fd_unset(pi->si); - fclose(pi->child_stdout_fp); pi->child_stdout_fp = NULL; spawn_server_instance_read_fd_unset(pi->si); + spawn_popen_close_files(pi); int status = spawn_server_exec_wait(netdata_main_spawn_server, pi->si); freez(pi); return spawn_popen_status_rc(status); @@ -134,9 +187,23 @@ int spawn_popen_wait(POPEN_INSTANCE *pi) { int spawn_popen_kill(POPEN_INSTANCE *pi) { if(!pi) return -1; - fclose(pi->child_stdin_fp); pi->child_stdin_fp = NULL; spawn_server_instance_write_fd_unset(pi->si); - fclose(pi->child_stdout_fp); pi->child_stdout_fp = NULL; spawn_server_instance_read_fd_unset(pi->si); + spawn_popen_close_files(pi); int status = spawn_server_exec_kill(netdata_main_spawn_server, pi->si); freez(pi); return spawn_popen_status_rc(status); } + +pid_t spawn_popen_pid(POPEN_INSTANCE *pi) { + if(!pi) return -1; + return spawn_server_instance_pid(pi->si); +} + +int spawn_popen_read_fd(POPEN_INSTANCE *pi) { + if(!pi) return -1; + return spawn_server_instance_read_fd(pi->si); +} + +int spawn_popen_write_fd(POPEN_INSTANCE *pi) { + if(!pi) return -1; + return spawn_server_instance_write_fd(pi->si); +} diff --git a/src/libnetdata/spawn_server/spawn_popen.h b/src/libnetdata/spawn_server/spawn_popen.h index 253d1f34be7d04..5c00f32ff18bf1 100644 --- a/src/libnetdata/spawn_server/spawn_popen.h +++ b/src/libnetdata/spawn_server/spawn_popen.h @@ -9,11 +9,7 @@ extern SPAWN_SERVER *netdata_main_spawn_server; bool netdata_main_spawn_server_init(const char *name, int argc, const char **argv); void netdata_main_spawn_server_cleanup(void); -typedef struct { - SPAWN_INSTANCE *si; - FILE *child_stdin_fp; - FILE *child_stdout_fp; -} POPEN_INSTANCE; +typedef struct popen_instance POPEN_INSTANCE; POPEN_INSTANCE *spawn_popen_run(const char *cmd); POPEN_INSTANCE *spawn_popen_run_argv(const char **argv); @@ -21,4 +17,10 @@ POPEN_INSTANCE *spawn_popen_run_variadic(const char *cmd, ...); int spawn_popen_wait(POPEN_INSTANCE *pi); int spawn_popen_kill(POPEN_INSTANCE *pi); +pid_t spawn_popen_pid(POPEN_INSTANCE *pi); +int spawn_popen_read_fd(POPEN_INSTANCE *pi); +int spawn_popen_write_fd(POPEN_INSTANCE *pi); +FILE *spawn_popen_stdin(POPEN_INSTANCE *pi); +FILE *spawn_popen_stdout(POPEN_INSTANCE *pi); + #endif //SPAWN_POPEN_H diff --git a/src/libnetdata/spawn_server/spawn_server.h b/src/libnetdata/spawn_server/spawn_server.h index 5ba66ae382bf93..b57623ab0244b4 100644 --- a/src/libnetdata/spawn_server/spawn_server.h +++ b/src/libnetdata/spawn_server/spawn_server.h @@ -7,16 +7,12 @@ typedef enum __attribute__((packed)) { SPAWN_INSTANCE_TYPE_EXEC = 0, -#if !defined(OS_WINDOWS) SPAWN_INSTANCE_TYPE_CALLBACK = 1 -#endif } SPAWN_INSTANCE_TYPE; typedef enum __attribute__((packed)) { SPAWN_SERVER_OPTION_EXEC = (1 << 0), -#if !defined(OS_WINDOWS) SPAWN_SERVER_OPTION_CALLBACK = (1 << 1), -#endif } SPAWN_SERVER_OPTIONS; // this is only used publicly for SPAWN_INSTANCE_TYPE_CALLBACK @@ -27,7 +23,7 @@ typedef struct spawn_request { pid_t pid; // the pid of the child int sock; // the socket for this request int fds[SPAWN_SERVER_TRANSFER_FDS]; // 0 = stdin, 1 = stdout, 2 = stderr, 3 = custom - const char **environment; // the environment of the parent process + const char **envp; // the environment of the parent process const char **argv; // the command line and its parameters const void *data; // the data structure for the callback size_t data_size; // the data structure size @@ -36,17 +32,17 @@ typedef struct spawn_request { struct spawn_request *prev, *next; // linking of active requests at the spawn server } SPAWN_REQUEST; -typedef void (*spawn_request_callback_t)(SPAWN_REQUEST *request); +typedef int (*spawn_request_callback_t)(SPAWN_REQUEST *request); -typedef struct spawm_instance SPAWN_INSTANCE; +typedef struct spawn_instance SPAWN_INSTANCE; typedef struct spawn_server SPAWN_SERVER; SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options, const char *name, spawn_request_callback_t child_callback, int argc, const char **argv); void spawn_server_destroy(SPAWN_SERVER *server); SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custom_fd, const char **argv, const void *data, size_t data_size, SPAWN_INSTANCE_TYPE type); -int spawn_server_exec_kill(SPAWN_SERVER *server, SPAWN_INSTANCE *instance); -int spawn_server_exec_wait(SPAWN_SERVER *server, SPAWN_INSTANCE *instance); +int spawn_server_exec_kill(SPAWN_SERVER *server, SPAWN_INSTANCE *si); +int spawn_server_exec_wait(SPAWN_SERVER *server, SPAWN_INSTANCE *si); int spawn_server_instance_read_fd(SPAWN_INSTANCE *si); int spawn_server_instance_write_fd(SPAWN_INSTANCE *si); diff --git a/src/libnetdata/spawn_server/spawn_server_internals.h b/src/libnetdata/spawn_server/spawn_server_internals.h new file mode 100644 index 00000000000000..d819f295bdce66 --- /dev/null +++ b/src/libnetdata/spawn_server/spawn_server_internals.h @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SPAWN_SERVER_INTERNALS_H +#define NETDATA_SPAWN_SERVER_INTERNALS_H + +#include "../libnetdata.h" +#include "spawn_server.h" +#include "spawn_library.h" + +#if defined(OS_WINDOWS) +// #define SPAWN_SERVER_VERSION_WINDOWS 1 +// #define SPAWN_SERVER_VERSION_UV 1 +#define SPAWN_SERVER_VERSION_POSIX_SPAWN 1 +#else +#define SPAWN_SERVER_VERSION_NOFORK 1 +// #define SPAWN_SERVER_VERSION_UV 1 +// #define SPAWN_SERVER_VERSION_POSIX_SPAWN 1 +#endif + +#if defined(SPAWN_SERVER_VERSION_WINDOWS) +#include +#include +#include +#include +#include +#include +#include +#endif + +struct spawn_server { + size_t id; + size_t request_id; + const char *name; + +#if defined(SPAWN_SERVER_VERSION_UV) + uv_loop_t *loop; + uv_thread_t thread; + uv_async_t async; + bool stopping; + + SPINLOCK spinlock; + struct work_item *work_queue; +#endif + +#if defined(SPAWN_SERVER_VERSION_NOFORK) + SPAWN_SERVER_OPTIONS options; + + ND_UUID magic; // for authorizing requests, the client needs to know our random UUID + // it is ignored for PING requests + + int pipe[2]; + int sock; // the listening socket of the server + pid_t server_pid; + char *path; + spawn_request_callback_t cb; + + int argc; + const char **argv; +#endif + +#if defined(SPAWN_SERVER_VERSION_POSIX_SPAWN) +#endif + +#if defined(SPAWN_SERVER_VERSION_WINDOWS) +#endif +}; + +struct spawn_instance { + size_t request_id; + int sock; + int write_fd; + int read_fd; + pid_t child_pid; + +#if defined(SPAWN_SERVER_VERSION_UV) + uv_process_t process; + int exit_code; + uv_sem_t sem; +#endif + +#if defined(SPAWN_SERVER_VERSION_NOFORK) +#endif + +#if defined(SPAWN_SERVER_VERSION_POSIX_SPAWN) + const char *cmdline; + bool exited; + int waitpid_status; + struct spawn_instance *prev, *next; +#endif + +#if defined(SPAWN_SERVER_VERSION_WINDOWS) + HANDLE process_handle; + DWORD dwProcessId; +#endif +}; + +#endif //NETDATA_SPAWN_SERVER_INTERNALS_H diff --git a/src/libnetdata/spawn_server/spawn_server_libuv.c b/src/libnetdata/spawn_server/spawn_server_libuv.c new file mode 100644 index 00000000000000..4294398b63a702 --- /dev/null +++ b/src/libnetdata/spawn_server/spawn_server_libuv.c @@ -0,0 +1,395 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "spawn_server_internals.h" + +#if defined(SPAWN_SERVER_VERSION_UV) + +int spawn_server_instance_read_fd(SPAWN_INSTANCE *si) { return si->read_fd; } +int spawn_server_instance_write_fd(SPAWN_INSTANCE *si) { return si->write_fd; } +void spawn_server_instance_read_fd_unset(SPAWN_INSTANCE *si) { si->read_fd = -1; } +void spawn_server_instance_write_fd_unset(SPAWN_INSTANCE *si) { si->write_fd = -1; } +pid_t spawn_server_instance_pid(SPAWN_INSTANCE *si) { return uv_process_get_pid(&si->process); } + +typedef struct work_item { + int stderr_fd; + const char **argv; + uv_sem_t sem; + SPAWN_INSTANCE *instance; + struct work_item *prev; + struct work_item *next; +} work_item; + +int uv_errno_to_errno(int uv_err) { + switch (uv_err) { + case 0: return 0; + case UV_E2BIG: return E2BIG; + case UV_EACCES: return EACCES; + case UV_EADDRINUSE: return EADDRINUSE; + case UV_EADDRNOTAVAIL: return EADDRNOTAVAIL; + case UV_EAFNOSUPPORT: return EAFNOSUPPORT; + case UV_EAGAIN: return EAGAIN; + case UV_EAI_ADDRFAMILY: return EAI_ADDRFAMILY; + case UV_EAI_AGAIN: return EAI_AGAIN; + case UV_EAI_BADFLAGS: return EAI_BADFLAGS; +#if defined(EAI_CANCELED) + case UV_EAI_CANCELED: return EAI_CANCELED; +#endif + case UV_EAI_FAIL: return EAI_FAIL; + case UV_EAI_FAMILY: return EAI_FAMILY; + case UV_EAI_MEMORY: return EAI_MEMORY; + case UV_EAI_NODATA: return EAI_NODATA; + case UV_EAI_NONAME: return EAI_NONAME; + case UV_EAI_OVERFLOW: return EAI_OVERFLOW; + case UV_EAI_SERVICE: return EAI_SERVICE; + case UV_EAI_SOCKTYPE: return EAI_SOCKTYPE; + case UV_EALREADY: return EALREADY; + case UV_EBADF: return EBADF; + case UV_EBUSY: return EBUSY; + case UV_ECANCELED: return ECANCELED; + case UV_ECHARSET: return EILSEQ; // No direct mapping, using EILSEQ + case UV_ECONNABORTED: return ECONNABORTED; + case UV_ECONNREFUSED: return ECONNREFUSED; + case UV_ECONNRESET: return ECONNRESET; + case UV_EDESTADDRREQ: return EDESTADDRREQ; + case UV_EEXIST: return EEXIST; + case UV_EFAULT: return EFAULT; + case UV_EFBIG: return EFBIG; + case UV_EHOSTUNREACH: return EHOSTUNREACH; + case UV_EINTR: return EINTR; + case UV_EINVAL: return EINVAL; + case UV_EIO: return EIO; + case UV_EISCONN: return EISCONN; + case UV_EISDIR: return EISDIR; + case UV_ELOOP: return ELOOP; + case UV_EMFILE: return EMFILE; + case UV_EMSGSIZE: return EMSGSIZE; + case UV_ENAMETOOLONG: return ENAMETOOLONG; + case UV_ENETDOWN: return ENETDOWN; + case UV_ENETUNREACH: return ENETUNREACH; + case UV_ENFILE: return ENFILE; + case UV_ENOBUFS: return ENOBUFS; + case UV_ENODEV: return ENODEV; + case UV_ENOENT: return ENOENT; + case UV_ENOMEM: return ENOMEM; + case UV_ENONET: return ENONET; + case UV_ENOSPC: return ENOSPC; + case UV_ENOSYS: return ENOSYS; + case UV_ENOTCONN: return ENOTCONN; + case UV_ENOTDIR: return ENOTDIR; + case UV_ENOTEMPTY: return ENOTEMPTY; + case UV_ENOTSOCK: return ENOTSOCK; + case UV_ENOTSUP: return ENOTSUP; + case UV_ENOTTY: return ENOTTY; + case UV_ENXIO: return ENXIO; + case UV_EPERM: return EPERM; + case UV_EPIPE: return EPIPE; + case UV_EPROTO: return EPROTO; + case UV_EPROTONOSUPPORT: return EPROTONOSUPPORT; + case UV_EPROTOTYPE: return EPROTOTYPE; + case UV_ERANGE: return ERANGE; + case UV_EROFS: return EROFS; + case UV_ESHUTDOWN: return ESHUTDOWN; + case UV_ESPIPE: return ESPIPE; + case UV_ESRCH: return ESRCH; + case UV_ETIMEDOUT: return ETIMEDOUT; + case UV_ETXTBSY: return ETXTBSY; + case UV_EXDEV: return EXDEV; + default: return EINVAL; // Use EINVAL for unknown libuv errors + } +} + +static void posix_unmask_sigchld_on_thread(void) { + sigset_t sigset; + sigemptyset(&sigset); // Initialize the signal set to empty + sigaddset(&sigset, SIGCHLD); // Add SIGCHLD to the set + + if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) != 0) + netdata_log_error("SPAWN SERVER: cannot unmask SIGCHLD"); +} + +static void server_thread(void *arg) { + SPAWN_SERVER *server = (SPAWN_SERVER *)arg; + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: started"); + + // this thread needs to process SIGCHLD (by libuv) + // otherwise the on_exit() callback is never run + posix_unmask_sigchld_on_thread(); + + // run the event loop + uv_run(server->loop, UV_RUN_DEFAULT); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: ended"); +} + +static void on_process_exit(uv_process_t *req, int64_t exit_status, int term_signal) { + SPAWN_INSTANCE *si = (SPAWN_INSTANCE *)req->data; + si->exit_code = (int)(term_signal ? term_signal : exit_status << 8); + uv_close((uv_handle_t *)req, NULL); // Properly close the process handle + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: process with pid %d exited with code %d and term_signal %d", + si->child_pid, (int)exit_status, term_signal); + + uv_sem_post(&si->sem); // Signal that the process has exited +} + +static SPAWN_INSTANCE *spawn_process_with_libuv(uv_loop_t *loop, int stderr_fd, const char **argv) { + SPAWN_INSTANCE *si = NULL; + bool si_sem_init = false; + + int stdin_pipe[2] = { -1, -1 }; + int stdout_pipe[2] = { -1, -1 }; + + if (pipe(stdin_pipe) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: stdin pipe() failed"); + goto cleanup; + } + + if (pipe(stdout_pipe) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: stdout pipe() failed"); + goto cleanup; + } + + si = callocz(1, sizeof(SPAWN_INSTANCE)); + si->exit_code = -1; + + if (uv_sem_init(&si->sem, 0)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: uv_sem_init() failed"); + goto cleanup; + } + si_sem_init = true; + + uv_stdio_container_t stdio[3] = { 0 }; + stdio[0].flags = UV_INHERIT_FD; + stdio[0].data.fd = stdin_pipe[PIPE_READ]; + stdio[1].flags = UV_INHERIT_FD; + stdio[1].data.fd = stdout_pipe[PIPE_WRITE]; + stdio[2].flags = UV_INHERIT_FD; + stdio[2].data.fd = stderr_fd; + + uv_process_options_t options = { 0 }; + options.stdio_count = 3; + options.stdio = stdio; + options.exit_cb = on_process_exit; + options.file = argv[0]; + options.args = (char **)argv; + options.env = (char **)environ; + + // uv_spawn() does not close all other open file descriptors + // we have to close them manually + int fds[3] = { stdio[0].data.fd, stdio[1].data.fd, stdio[2].data.fd }; + os_close_all_non_std_open_fds_except(fds, 3, CLOSE_RANGE_CLOEXEC); + + int rc = uv_spawn(loop, &si->process, &options); + if (rc) { + errno = uv_errno_to_errno(rc); + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: uv_spawn() failed with error %s, %s", + uv_err_name(rc), uv_strerror(rc)); + goto cleanup; + } + + // Successfully spawned + + // get the pid of the process spawned + si->child_pid = uv_process_get_pid(&si->process); + + // on_process_exit() needs this to find the si + si->process.data = si; + + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN SERVER: process created with pid %d", si->child_pid); + + // close the child sides of the pipes + close(stdin_pipe[PIPE_READ]); + si->write_fd = stdin_pipe[PIPE_WRITE]; + si->read_fd = stdout_pipe[PIPE_READ]; + close(stdout_pipe[PIPE_WRITE]); + + return si; + +cleanup: + if(stdin_pipe[PIPE_READ] != -1) close(stdin_pipe[PIPE_READ]); + if(stdin_pipe[PIPE_WRITE] != -1) close(stdin_pipe[PIPE_WRITE]); + if(stdout_pipe[PIPE_READ] != -1) close(stdout_pipe[PIPE_READ]); + if(stdout_pipe[PIPE_WRITE] != -1) close(stdout_pipe[PIPE_WRITE]); + if(si) { + if(si_sem_init) + uv_sem_destroy(&si->sem); + + freez(si); + } + return NULL; +} + +static void async_callback(uv_async_t *handle) { + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN SERVER: dequeue commands started"); + SPAWN_SERVER *server = (SPAWN_SERVER *)handle->data; + + // Check if the server is stopping + if (__atomic_load_n(&server->stopping, __ATOMIC_RELAXED)) { + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN SERVER: stopping..."); + uv_stop(server->loop); + return; + } + + work_item *item; + spinlock_lock(&server->spinlock); + while (server->work_queue) { + item = server->work_queue; + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(server->work_queue, item, prev, next); + spinlock_unlock(&server->spinlock); + + item->instance = spawn_process_with_libuv(server->loop, item->stderr_fd, item->argv); + uv_sem_post(&item->sem); + + spinlock_lock(&server->spinlock); + } + spinlock_unlock(&server->spinlock); + + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN SERVER: dequeue commands done"); +} + + +SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options __maybe_unused, const char *name, spawn_request_callback_t cb __maybe_unused, int argc __maybe_unused, const char **argv __maybe_unused) { + SPAWN_SERVER* server = callocz(1, sizeof(SPAWN_SERVER)); + spinlock_init(&server->spinlock); + + if (name) + server->name = strdupz(name); + else + server->name = strdupz("unnamed"); + + server->loop = callocz(1, sizeof(uv_loop_t)); + if (uv_loop_init(server->loop)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: uv_loop_init() failed"); + freez(server->loop); + freez((void *)server->name); + freez(server); + return NULL; + } + + if (uv_async_init(server->loop, &server->async, async_callback)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: uv_async_init() failed"); + uv_loop_close(server->loop); + freez(server->loop); + freez((void *)server->name); + freez(server); + return NULL; + } + server->async.data = server; + + if (uv_thread_create(&server->thread, server_thread, server)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: uv_thread_create() failed"); + uv_close((uv_handle_t*)&server->async, NULL); + uv_loop_close(server->loop); + freez(server->loop); + freez((void *)server->name); + freez(server); + return NULL; + } + + return server; +} + +static void close_handle(uv_handle_t* handle, void* arg __maybe_unused) { + if (!uv_is_closing(handle)) { + uv_close(handle, NULL); + } +} + +void spawn_server_destroy(SPAWN_SERVER *server) { + if (!server) return; + + __atomic_store_n(&server->stopping, true, __ATOMIC_RELAXED); + + // Trigger the async callback to stop the event loop + uv_async_send(&server->async); + + // Wait for the server thread to finish + uv_thread_join(&server->thread); + + uv_stop(server->loop); + uv_close((uv_handle_t*)&server->async, NULL); + + // Walk through and close any remaining handles + uv_walk(server->loop, close_handle, NULL); + + uv_loop_close(server->loop); + freez(server->loop); + freez((void *)server->name); + freez(server); +} + +SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd __maybe_unused, int custom_fd __maybe_unused, const char **argv, const void *data __maybe_unused, size_t data_size __maybe_unused, SPAWN_INSTANCE_TYPE type) { + if (type != SPAWN_INSTANCE_TYPE_EXEC) + return NULL; + + work_item item = { 0 }; + item.stderr_fd = stderr_fd; + item.argv = argv; + + if (uv_sem_init(&item.sem, 0)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: uv_sem_init() failed"); + return NULL; + } + + spinlock_lock(&server->spinlock); + // item is in the stack, but the server will remove it before sending to us + // the semaphore, so it is safe to have the item in the stack. + work_item *item_ptr = &item; + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(server->work_queue, item_ptr, prev, next); + spinlock_unlock(&server->spinlock); + + uv_async_send(&server->async); + + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN PARENT: queued command"); + + // Wait for the command to be executed + uv_sem_wait(&item.sem); + uv_sem_destroy(&item.sem); + + if (!item.instance) { + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN PARENT: process failed to be started"); + return NULL; + } + + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN PARENT: process started"); + + return item.instance; +} + +int spawn_server_exec_kill(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *si) { + if(!si) return -1; + + // close all pipe descriptors to force the child to exit + if(si->read_fd != -1) { close(si->read_fd); si->read_fd = -1; } + if(si->write_fd != -1) { close(si->write_fd); si->write_fd = -1; } + + if (uv_process_kill(&si->process, SIGTERM)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: uv_process_kill() failed"); + return -1; + } + + return spawn_server_exec_wait(server, si); +} + +int spawn_server_exec_wait(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *si) { + if (!si) return -1; + + // close all pipe descriptors to force the child to exit + if(si->read_fd != -1) { close(si->read_fd); si->read_fd = -1; } + if(si->write_fd != -1) { close(si->write_fd); si->write_fd = -1; } + + // Wait for the process to exit + uv_sem_wait(&si->sem); + int exit_code = si->exit_code; + + uv_sem_destroy(&si->sem); + freez(si); + return exit_code; +} + +#endif diff --git a/src/libnetdata/spawn_server/spawn_server.c b/src/libnetdata/spawn_server/spawn_server_nofork.c similarity index 75% rename from src/libnetdata/spawn_server/spawn_server.c rename to src/libnetdata/spawn_server/spawn_server_nofork.c index ef6755c324decd..0bde3cd77b41c2 100644 --- a/src/libnetdata/spawn_server/spawn_server.c +++ b/src/libnetdata/spawn_server/spawn_server_nofork.c @@ -1,287 +1,14 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../libnetdata.h" +#include "spawn_server_internals.h" -#include "spawn_server.h" - -#if defined(OS_WINDOWS) -#include -#include -#include -#include -#include -#endif - -struct spawn_server { - size_t id; - size_t request_id; - const char *name; -#if !defined(OS_WINDOWS) - SPAWN_SERVER_OPTIONS options; - - ND_UUID magic; // for authorizing requests, the client needs to know our random UUID - // it is ignored for PING requests - - int pipe[2]; - int sock; // the listening socket of the server - pid_t server_pid; - char *path; - spawn_request_callback_t cb; - - int argc; - const char **argv; -#endif -}; - -struct spawm_instance { - size_t request_id; - int sock; - int write_fd; - int read_fd; - pid_t child_pid; - -#if defined(OS_WINDOWS) - HANDLE process_handle; - HANDLE read_handle; - HANDLE write_handle; -#endif -}; +#if defined(SPAWN_SERVER_VERSION_NOFORK) int spawn_server_instance_read_fd(SPAWN_INSTANCE *si) { return si->read_fd; } int spawn_server_instance_write_fd(SPAWN_INSTANCE *si) { return si->write_fd; } -pid_t spawn_server_instance_pid(SPAWN_INSTANCE *si) { return si->child_pid; } void spawn_server_instance_read_fd_unset(SPAWN_INSTANCE *si) { si->read_fd = -1; } void spawn_server_instance_write_fd_unset(SPAWN_INSTANCE *si) { si->write_fd = -1; } - -#if defined(OS_WINDOWS) - -SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options __maybe_unused, const char *name, spawn_request_callback_t cb __maybe_unused, int argc __maybe_unused, const char **argv __maybe_unused) { - SPAWN_SERVER* server = callocz(1, sizeof(SPAWN_SERVER)); - if(name) - server->name = strdupz(name); - else - server->name = strdupz("unnamed"); - return server; -} - -void spawn_server_destroy(SPAWN_SERVER *server) { - if (server) { - freez((void *)server->name); - freez(server); - } -} - -static BUFFER *argv_to_windows(const char **argv) { - BUFFER *wb = buffer_create(0, NULL); - - // argv[0] is the path - char b[strlen(argv[0]) * 2 + 1024]; - cygwin_conv_path(CCP_POSIX_TO_WIN_A | CCP_ABSOLUTE, argv[0], b, sizeof(b)); - - buffer_strcat(wb, "cmd.exe /C "); - - for(size_t i = 0; argv[i] ;i++) { - const char *s = (i == 0) ? b : argv[i]; - size_t len = strlen(s); - buffer_need_bytes(wb, len * 2 + 1); - - bool needs_quotes = false; - for(const char *c = s; !needs_quotes && *c ; c++) { - switch(*c) { - case ' ': - case '\v': - case '\t': - case '\n': - case '"': - needs_quotes = true; - break; - - default: - break; - } - } - - if(needs_quotes && buffer_strlen(wb)) - buffer_strcat(wb, " \""); - else - buffer_putc(wb, ' '); - - for(const char *c = s; *c ; c++) { - switch(*c) { - case '"': - buffer_putc(wb, '\\'); - // fall through - - default: - buffer_putc(wb, *c); - break; - } - } - - if(needs_quotes) - buffer_strcat(wb, "\""); - } - - return wb; -} - -SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custom_fd __maybe_unused, const char **argv, const void *data __maybe_unused, size_t data_size __maybe_unused, SPAWN_INSTANCE_TYPE type) { - static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; - - if (type != SPAWN_INSTANCE_TYPE_EXEC) - return NULL; - - int pipe_stdin[2] = { -1, -1 }, pipe_stdout[2] = { -1, -1 }; - - errno_clear(); - - SPAWN_INSTANCE *instance = callocz(1, sizeof(*instance)); - instance->request_id = __atomic_add_fetch(&server->request_id, 1, __ATOMIC_RELAXED); - - CLEAN_BUFFER *wb = argv_to_windows(argv); - char *command = (char *)buffer_tostring(wb); - - if (pipe(pipe_stdin) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Cannot create stdin pipe() for request No %zu, command: %s", - instance->request_id, command); - goto cleanup; - } - - if (pipe(pipe_stdout) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Cannot create stdout pipe() for request No %zu, command: %s", - instance->request_id, command); - goto cleanup; - } - - // do not run multiple times this section - // to prevent handles leaking - spinlock_lock(&spinlock); - - // Convert POSIX file descriptors to Windows handles - HANDLE stdin_read_handle = (HANDLE)_get_osfhandle(pipe_stdin[0]); - HANDLE stdout_write_handle = (HANDLE)_get_osfhandle(pipe_stdout[1]); - HANDLE stderr_handle = (HANDLE)_get_osfhandle(stderr_fd); - - if (stdin_read_handle == INVALID_HANDLE_VALUE || stdout_write_handle == INVALID_HANDLE_VALUE || stderr_handle == INVALID_HANDLE_VALUE) { - spinlock_unlock(&spinlock); - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Invalid handle value(s) for request No %zu, command: %s", - instance->request_id, command); - goto cleanup; - } - - // Set handle inheritance - if (!SetHandleInformation(stdin_read_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT) || - !SetHandleInformation(stdout_write_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT) || - !SetHandleInformation(stderr_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT)) { - spinlock_unlock(&spinlock); - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Cannot set handle(s) inheritance for request No %zu, command: %s", - instance->request_id, command); - goto cleanup; - } - - // Set up the STARTUPINFO structure - STARTUPINFO si; - PROCESS_INFORMATION pi; - ZeroMemory(&si, sizeof(si)); - si.cb = sizeof(si); - si.dwFlags = STARTF_USESTDHANDLES; - si.hStdInput = stdin_read_handle; - si.hStdOutput = stdout_write_handle; - si.hStdError = stderr_handle; - - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: Running request No %zu, command: %s", - instance->request_id, command); - - // Spawn the process - if (!CreateProcess(NULL, command, NULL, NULL, TRUE, 0, NULL, NULL, &si, &pi)) { - spinlock_unlock(&spinlock); - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: cannot CreateProcess() for request No %zu, command: %s", - instance->request_id, command); - goto cleanup; - } - - CloseHandle(pi.hThread); - - // end of the critical section - spinlock_unlock(&spinlock); - - // Close unused pipe ends - close(pipe_stdin[0]); pipe_stdin[0] = -1; - close(pipe_stdout[1]); pipe_stdout[1] = -1; - - // Store process information in instance - instance->child_pid = cygwin_winpid_to_pid(pi.dwProcessId); - if(instance->child_pid == -1) instance->child_pid = pi.dwProcessId; - - instance->process_handle = pi.hProcess; - - // Convert handles to POSIX file descriptors - instance->write_fd = pipe_stdin[1]; - instance->read_fd = pipe_stdout[0]; - - errno_clear(); - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: created process for request No %zu, pid %d, command: %s", - instance->request_id, (int)instance->child_pid, command); - - return instance; - -cleanup: - if (pipe_stdin[0] >= 0) close(pipe_stdin[0]); - if (pipe_stdin[1] >= 0) close(pipe_stdin[1]); - if (pipe_stdout[0] >= 0) close(pipe_stdout[0]); - if (pipe_stdout[1] >= 0) close(pipe_stdout[1]); - freez(instance); - return NULL; -} - -int spawn_server_exec_kill(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *instance) { - if(instance->read_fd != -1) { close(instance->read_fd); instance->read_fd = -1; } - if(instance->write_fd != -1) { close(instance->write_fd); instance->write_fd = -1; } - CloseHandle(instance->read_handle); instance->read_handle = NULL; - CloseHandle(instance->write_handle); instance->write_handle = NULL; - - TerminateProcess(instance->process_handle, 0); - - DWORD exit_code; - GetExitCodeProcess(instance->process_handle, &exit_code); - CloseHandle(instance->process_handle); - - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: child of request No %zu, pid %d, killed and exited with code %d", - instance->request_id, (int)instance->child_pid, (int)exit_code); - - freez(instance); - return (int)exit_code; -} - -int spawn_server_exec_wait(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *instance) { - if(instance->read_fd != -1) { close(instance->read_fd); instance->read_fd = -1; } - if(instance->write_fd != -1) { close(instance->write_fd); instance->write_fd = -1; } - CloseHandle(instance->read_handle); instance->read_handle = NULL; - CloseHandle(instance->write_handle); instance->write_handle = NULL; - - WaitForSingleObject(instance->process_handle, INFINITE); - - DWORD exit_code = -1; - GetExitCodeProcess(instance->process_handle, &exit_code); - CloseHandle(instance->process_handle); - - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN PARENT: child of request No %zu, pid %d, waited and exited with code %d", - instance->request_id, (int)instance->child_pid, (int)exit_code); - - freez(instance); - return (int)exit_code; -} - -#else // !OS_WINDOWS +pid_t spawn_server_instance_pid(SPAWN_INSTANCE *si) { return si->child_pid; } #ifdef __APPLE__ #include @@ -344,19 +71,19 @@ static void spawn_server_run_child(SPAWN_SERVER *server, SPAWN_REQUEST *rq) { nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: cannot dup2(%d) stdin of request No %zu: %s", stdin_fd, rq->request_id, rq->cmdline); - exit(1); + exit(EXIT_FAILURE); } if (dup2(stdout_fd, STDOUT_FILENO) == -1) { nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: cannot dup2(%d) stdin of request No %zu: %s", stdout_fd, rq->request_id, rq->cmdline); - exit(1); + exit(EXIT_FAILURE); } if (dup2(stderr_fd, STDERR_FILENO) == -1) { nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: cannot dup2(%d) stderr of request No %zu: %s", stderr_fd, rq->request_id, rq->cmdline); - exit(1); + exit(EXIT_FAILURE); } // close the excess fds @@ -365,34 +92,10 @@ static void spawn_server_run_child(SPAWN_SERVER *server, SPAWN_REQUEST *rq) { close(stderr_fd); stderr_fd = rq->fds[2] = STDERR_FILENO; // overwrite the process environment - environ = (char **)rq->environment; - - // Perform different actions based on the type - switch (rq->type) { - - case SPAWN_INSTANCE_TYPE_EXEC: - // close all fds except the ones we need - os_close_all_non_std_open_fds_except(NULL, 0); - - // run the command - execvp(rq->argv[0], (char **)rq->argv); - - nd_log(NDLS_COLLECTORS, NDLP_ERR, - "SPAWN SERVER: Failed to execute command of request No %zu: %s", - rq->request_id, rq->cmdline); - - exit(1); - break; + environ = (char **)rq->envp; - case SPAWN_INSTANCE_TYPE_CALLBACK: - server->cb(rq); - exit(0); - break; - - default: - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: unknown request type %u", rq->type); - exit(1); - } + // run the callback and return its code + exit(server->cb(rq)); } // -------------------------------------------------------------------------------------------------------------------- @@ -457,54 +160,6 @@ static const char** argv_decode(const char *buffer, size_t size) { return argv; } -static BUFFER *argv_to_cmdline_buffer(const char **argv) { - BUFFER *wb = buffer_create(0, NULL); - - for(size_t i = 0; argv[i] ;i++) { - const char *s = argv[i]; - size_t len = strlen(s); - buffer_need_bytes(wb, len * 2 + 1); - - bool needs_quotes = false; - for(const char *c = s; !needs_quotes && *c ; c++) { - switch(*c) { - case ' ': - case '\v': - case '\t': - case '\n': - case '"': - needs_quotes = true; - break; - - default: - break; - } - } - - if(needs_quotes && buffer_strlen(wb)) - buffer_strcat(wb, " \""); - else - buffer_putc(wb, ' '); - - for(const char *c = s; *c ; c++) { - switch(*c) { - case '"': - buffer_putc(wb, '\\'); - // fall through - - default: - buffer_putc(wb, *c); - break; - } - } - - if(needs_quotes) - buffer_strcat(wb, "\""); - } - - return wb; -} - // -------------------------------------------------------------------------------------------------------------------- // status reports @@ -602,50 +257,105 @@ static void request_free(SPAWN_REQUEST *rq) { if(rq->fds[3] != -1) close(rq->fds[3]); if(rq->sock != -1) close(rq->sock); freez((void *)rq->argv); - freez((void *)rq->environment); + freez((void *)rq->envp); freez((void *)rq->data); freez((void *)rq->cmdline); freez((void *)rq); } -static void spawn_server_execute_request(SPAWN_SERVER *server, SPAWN_REQUEST *rq) { - switch(rq->type) { - case SPAWN_INSTANCE_TYPE_EXEC: - // close custom_fd - it is not needed for exec mode - if(rq->fds[3] != -1) { close(rq->fds[3]); rq->fds[3] = -1; } +static bool spawn_external_command(SPAWN_SERVER *server __maybe_unused, SPAWN_REQUEST *rq) { + // Close custom_fd - it is not needed for exec mode + if(rq->fds[3] != -1) { close(rq->fds[3]); rq->fds[3] = -1; } - // create the cmdline for logs - if(rq->argv) { - CLEAN_BUFFER *wb = argv_to_cmdline_buffer(rq->argv); - rq->cmdline = strdupz(buffer_tostring(wb)); - } - break; + if(!rq->argv) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: there is no argv pointer to exec"); + return false; + } - case SPAWN_INSTANCE_TYPE_CALLBACK: - if(server->cb == NULL) { - errno = ENOSYS; - spawn_server_send_status_failure(rq); - request_free(rq); - return; - } - rq->cmdline = strdupz("callback() function"); - break; + if(rq->fds[0] == -1 || rq->fds[1] == -1 || rq->fds[2] == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: stdio fds are missing from the request"); + return false; + } - default: - errno = EINVAL; - spawn_server_send_status_failure(rq); - request_free(rq); - return; + CLEAN_BUFFER *wb = argv_to_cmdline_buffer(rq->argv); + rq->cmdline = strdupz(buffer_tostring(wb)); + + posix_spawn_file_actions_t file_actions; + if (posix_spawn_file_actions_init(&file_actions) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawn_file_actions_init() failed: %s", rq->cmdline); + return false; + } + + posix_spawn_file_actions_adddup2(&file_actions, rq->fds[0], STDIN_FILENO); + posix_spawn_file_actions_adddup2(&file_actions, rq->fds[1], STDOUT_FILENO); + posix_spawn_file_actions_adddup2(&file_actions, rq->fds[2], STDERR_FILENO); + posix_spawn_file_actions_addclose(&file_actions, rq->fds[0]); + posix_spawn_file_actions_addclose(&file_actions, rq->fds[1]); + posix_spawn_file_actions_addclose(&file_actions, rq->fds[2]); + + posix_spawnattr_t attr; + if (posix_spawnattr_init(&attr) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawnattr_init() failed: %s", rq->cmdline); + posix_spawn_file_actions_destroy(&file_actions); + return false; + } + + // Set the flags to reset the signal mask and signal actions + sigset_t empty_mask; + sigemptyset(&empty_mask); + if (posix_spawnattr_setsigmask(&attr, &empty_mask) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawnattr_setsigmask() failed: %s", rq->cmdline); + posix_spawn_file_actions_destroy(&file_actions); + posix_spawnattr_destroy(&attr); + return false; + } + + short flags = POSIX_SPAWN_SETSIGMASK | POSIX_SPAWN_SETSIGDEF; + if (posix_spawnattr_setflags(&attr, flags) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawnattr_setflags() failed: %s", rq->cmdline); + posix_spawn_file_actions_destroy(&file_actions); + posix_spawnattr_destroy(&attr); + return false; + } + + os_close_all_non_std_open_fds_except(rq->fds, 3, CLOSE_RANGE_CLOEXEC); + + errno_clear(); + if (posix_spawn(&rq->pid, rq->argv[0], &file_actions, &attr, (char * const *)rq->argv, (char * const *)rq->envp) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: posix_spawn() failed: %s", rq->cmdline); + + posix_spawnattr_destroy(&attr); + posix_spawn_file_actions_destroy(&file_actions); + return false; + } + + // Destroy the posix_spawnattr_t and posix_spawn_file_actions_t structures + posix_spawnattr_destroy(&attr); + posix_spawn_file_actions_destroy(&file_actions); + + // Close the read end of the stdin pipe and the write end of the stdout pipe in the parent process + close(rq->fds[0]); rq->fds[0] = -1; + close(rq->fds[1]); rq->fds[1] = -1; + close(rq->fds[2]); rq->fds[2] = -1; + + nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "SPAWN SERVER: process created with pid %d: %s", rq->pid, rq->cmdline); + return true; +} + +static bool spawn_server_run_callback(SPAWN_SERVER *server __maybe_unused, SPAWN_REQUEST *rq) { + rq->cmdline = strdupz("callback() function"); + + if(server->cb == NULL) { + errno = ENOSYS; + return false; } pid_t pid = fork(); if (pid < 0) { // fork failed - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to fork() child."); - spawn_server_send_status_failure(rq); - request_free(rq); - return; + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to fork() child for callback."); + return false; } else if (pid == 0) { // the child @@ -657,11 +367,37 @@ static void spawn_server_execute_request(SPAWN_SERVER *server, SPAWN_REQUEST *rq // the parent rq->pid = pid; + return true; +} + +static void spawn_server_execute_request(SPAWN_SERVER *server, SPAWN_REQUEST *rq) { + bool done; + switch(rq->type) { + case SPAWN_INSTANCE_TYPE_EXEC: + done = spawn_external_command(server, rq); + break; + + case SPAWN_INSTANCE_TYPE_CALLBACK: + done = spawn_server_run_callback(server, rq); + break; + + default: + errno = EINVAL; + done = false; + break; + } + + if(!done) { + spawn_server_send_status_failure(rq); + request_free(rq); + return; + } + // let the parent know spawn_server_send_status_success(rq); // do not keep data we don't need at the parent - freez((void *)rq->environment); rq->environment = NULL; + freez((void *)rq->envp); rq->envp = NULL; freez((void *)rq->argv); rq->argv = NULL; freez((void *)rq->data); rq->data = NULL; rq->data_size = 0; @@ -747,7 +483,7 @@ static bool spawn_server_send_request(ND_UUID *magic, SPAWN_REQUEST *request) { bool ret = false; size_t env_size = 0; - void *encoded_env = argv_encode(request->environment, &env_size); + void *encoded_env = argv_encode(request->envp, &env_size); if (!encoded_env) goto cleanup; @@ -974,7 +710,7 @@ static void spawn_server_receive_request(int sock, SPAWN_SERVER *server) { [2] = stderr_fd, [3] = custom_fd, }, - .environment = argv_decode(envp_encoded, env_size), + .envp = argv_decode(envp_encoded, env_size), .argv = argv_decode(argv_encoded, argv_size), .data = data, .data_size = data_size, @@ -1082,20 +818,21 @@ static void spawn_server_process_sigchld(void) { } } -static void signals_unblock(void) { +static void posix_unmask_sigchld_on_thread(void) { sigset_t sigset; - sigfillset(&sigset); + sigemptyset(&sigset); // Initialize the signal set to empty + sigaddset(&sigset, SIGCHLD); // Add SIGCHLD to the set - if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) == -1) { - netdata_log_error("SPAWN SERVER: Could not unblock signals for threads"); - } + if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) != 0) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN SERVER: cannot unmask SIGCHLD"); } static void spawn_server_event_loop(SPAWN_SERVER *server) { int pipe_fd = server->pipe[1]; close(server->pipe[0]); server->pipe[0] = -1; - signals_unblock(); + posix_unmask_sigchld_on_thread(); // Set up the signal handler for SIGCHLD and SIGTERM struct sigaction sa; @@ -1336,7 +1073,7 @@ SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options, const char *name } replace_stdio_with_dev_null(); - os_close_all_non_std_open_fds_except((int[]){ server->sock, server->pipe[1] }, 2); + os_close_all_non_std_open_fds_except((int[]){ server->sock, server->pipe[1] }, 2, 0); nd_log_reopen_log_files_for_spawn_server(); spawn_server_event_loop(server); } @@ -1458,7 +1195,7 @@ SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custo [2] = stderr_fd, [3] = custom_fd, }, - .environment = (const char **)environ, + .envp = (const char **)environ, .argv = argv, .data = data, .data_size = data_size, @@ -1530,4 +1267,4 @@ SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custo return NULL; } -#endif // !OS_WINDOWS +#endif diff --git a/src/libnetdata/spawn_server/spawn_server_posix.c b/src/libnetdata/spawn_server/spawn_server_posix.c new file mode 100644 index 00000000000000..49b081f134e9a6 --- /dev/null +++ b/src/libnetdata/spawn_server/spawn_server_posix.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "spawn_server_internals.h" + +#if defined(SPAWN_SERVER_VERSION_POSIX_SPAWN) + +#ifdef __APPLE__ +#include +#define environ (*_NSGetEnviron()) +#else +extern char **environ; +#endif + +int spawn_server_instance_read_fd(SPAWN_INSTANCE *si) { return si->read_fd; } +int spawn_server_instance_write_fd(SPAWN_INSTANCE *si) { return si->write_fd; } +void spawn_server_instance_read_fd_unset(SPAWN_INSTANCE *si) { si->read_fd = -1; } +void spawn_server_instance_write_fd_unset(SPAWN_INSTANCE *si) { si->write_fd = -1; } +pid_t spawn_server_instance_pid(SPAWN_INSTANCE *si) { return si->child_pid; } + +static struct { + bool sigchld_initialized; + SPINLOCK spinlock; + SPAWN_INSTANCE *instances; +} spawn_globals = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .instances = NULL, +}; + +//static void sigchld_handler(int signum __maybe_unused) { +// pid_t pid; +// int status; +// +// while ((pid = waitpid(-1, &status, WNOHANG)) > 0) { +// // Find the SPAWN_INSTANCE corresponding to this pid +// spinlock_lock(&spawn_globals.spinlock); +// for(SPAWN_INSTANCE *si = spawn_globals.instances; si ;si = si->next) { +// if (si->child_pid == pid) { +// __atomic_store_n(&si->waitpid_status, status, __ATOMIC_RELAXED); +// __atomic_store_n(&si->exited, true, __ATOMIC_RELAXED); +// break; +// } +// } +// spinlock_unlock(&spawn_globals.spinlock); +// } +//} + +SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options __maybe_unused, const char *name, spawn_request_callback_t cb __maybe_unused, int argc __maybe_unused, const char **argv __maybe_unused) { + SPAWN_SERVER* server = callocz(1, sizeof(SPAWN_SERVER)); + + if (name) + server->name = strdupz(name); + else + server->name = strdupz("unnamed"); + + if(!spawn_globals.sigchld_initialized) { + spawn_globals.sigchld_initialized = true; + +// struct sigaction sa; +// sa.sa_handler = sigchld_handler; +// sigemptyset(&sa.sa_mask); +// sa.sa_flags = SA_RESTART | SA_NOCLDSTOP; +// if (sigaction(SIGCHLD, &sa, NULL) == -1) { +// nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: Failed to set SIGCHLD handler"); +// freez((void *)server->name); +// freez(server); +// return NULL; +// } + } + + return server; +} + +void spawn_server_destroy(SPAWN_SERVER *server) { + if (!server) return; + freez((void *)server->name); + freez(server); +} + +SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custom_fd __maybe_unused, const char **argv, const void *data __maybe_unused, size_t data_size __maybe_unused, SPAWN_INSTANCE_TYPE type) { + if (type != SPAWN_INSTANCE_TYPE_EXEC) + return NULL; + + CLEAN_BUFFER *cmdline_wb = argv_to_cmdline_buffer(argv); + const char *cmdline = buffer_tostring(cmdline_wb); + + SPAWN_INSTANCE *si = callocz(1, sizeof(SPAWN_INSTANCE)); + si->child_pid = -1; + si->request_id = __atomic_add_fetch(&server->request_id, 1, __ATOMIC_RELAXED); + + int stdin_pipe[2] = { -1, -1 }; + int stdout_pipe[2] = { -1, -1 }; + + if (pipe(stdin_pipe) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: stdin pipe() failed: %s", cmdline); + freez(si); + return NULL; + } + + if (pipe(stdout_pipe) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: stdout pipe() failed: %s", cmdline); + close(stdin_pipe[PIPE_READ]); + close(stdin_pipe[PIPE_WRITE]); + freez(si); + return NULL; + } + + posix_spawn_file_actions_t file_actions; + posix_spawnattr_t attr; + + if (posix_spawn_file_actions_init(&file_actions) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawn_file_actions_init() failed: %s", cmdline); + close(stdin_pipe[PIPE_READ]); + close(stdin_pipe[PIPE_WRITE]); + close(stdout_pipe[PIPE_READ]); + close(stdout_pipe[PIPE_WRITE]); + freez(si); + return NULL; + } + + posix_spawn_file_actions_adddup2(&file_actions, stdin_pipe[PIPE_READ], STDIN_FILENO); + posix_spawn_file_actions_adddup2(&file_actions, stdout_pipe[PIPE_WRITE], STDOUT_FILENO); + posix_spawn_file_actions_addclose(&file_actions, stdin_pipe[PIPE_READ]); + posix_spawn_file_actions_addclose(&file_actions, stdin_pipe[PIPE_WRITE]); + posix_spawn_file_actions_addclose(&file_actions, stdout_pipe[PIPE_READ]); + posix_spawn_file_actions_addclose(&file_actions, stdout_pipe[PIPE_WRITE]); + if(stderr_fd != STDERR_FILENO) { + posix_spawn_file_actions_adddup2(&file_actions, stderr_fd, STDERR_FILENO); + posix_spawn_file_actions_addclose(&file_actions, stderr_fd); + } + + if (posix_spawnattr_init(&attr) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawnattr_init() failed: %s", cmdline); + posix_spawn_file_actions_destroy(&file_actions); + close(stdin_pipe[PIPE_READ]); + close(stdin_pipe[PIPE_WRITE]); + close(stdout_pipe[PIPE_READ]); + close(stdout_pipe[PIPE_WRITE]); + freez(si); + return NULL; + } + + // Set the flags to reset the signal mask and signal actions + sigset_t empty_mask; + sigemptyset(&empty_mask); + if (posix_spawnattr_setsigmask(&attr, &empty_mask) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawnattr_setsigmask() failed: %s", cmdline); + posix_spawn_file_actions_destroy(&file_actions); + posix_spawnattr_destroy(&attr); + return false; + } + + short flags = POSIX_SPAWN_SETSIGMASK | POSIX_SPAWN_SETSIGDEF; + if (posix_spawnattr_setflags(&attr, flags) != 0) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: posix_spawnattr_setflags() failed: %s", cmdline); + posix_spawn_file_actions_destroy(&file_actions); + posix_spawnattr_destroy(&attr); + return false; + } + + spinlock_lock(&spawn_globals.spinlock); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(spawn_globals.instances, si, prev, next); + spinlock_unlock(&spawn_globals.spinlock); + + // unfortunately, on CYGWIN/MSYS posix_spawn() is not thread safe + // so, we run it one by one. + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + spinlock_lock(&spinlock); + + int fds[3] = { stdin_pipe[PIPE_READ], stdout_pipe[PIPE_WRITE], stderr_fd }; + os_close_all_non_std_open_fds_except(fds, 3, CLOSE_RANGE_CLOEXEC); + + errno_clear(); + if (posix_spawn(&si->child_pid, argv[0], &file_actions, &attr, (char * const *)argv, environ) != 0) { + spinlock_unlock(&spinlock); + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: posix_spawn() failed: %s", cmdline); + + spinlock_lock(&spawn_globals.spinlock); + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(spawn_globals.instances, si, prev, next); + spinlock_unlock(&spawn_globals.spinlock); + + posix_spawnattr_destroy(&attr); + posix_spawn_file_actions_destroy(&file_actions); + + close(stdin_pipe[PIPE_READ]); + close(stdin_pipe[PIPE_WRITE]); + close(stdout_pipe[PIPE_READ]); + close(stdout_pipe[PIPE_WRITE]); + freez(si); + return NULL; + } + spinlock_unlock(&spinlock); + + // Destroy the posix_spawnattr_t and posix_spawn_file_actions_t structures + posix_spawnattr_destroy(&attr); + posix_spawn_file_actions_destroy(&file_actions); + + // Close the read end of the stdin pipe and the write end of the stdout pipe in the parent process + close(stdin_pipe[PIPE_READ]); + close(stdout_pipe[PIPE_WRITE]); + + si->write_fd = stdin_pipe[PIPE_WRITE]; + si->read_fd = stdout_pipe[PIPE_READ]; + si->cmdline = strdupz(cmdline); + + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN SERVER: process created with pid %d: %s", + si->child_pid, cmdline); + return si; +} + +int spawn_server_exec_kill(SPAWN_SERVER *server, SPAWN_INSTANCE *si) { + if (!si) return -1; + + if (kill(si->child_pid, SIGTERM)) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: kill() of pid %d failed: %s", + si->child_pid, si->cmdline); + + return spawn_server_exec_wait(server, si); +} + +int spawn_server_exec_wait(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *si) { + if (!si) return -1; + + // Close all pipe descriptors to force the child to exit + if (si->read_fd != -1) close(si->read_fd); + if (si->write_fd != -1) close(si->write_fd); + + // Wait for the process to exit + int status = __atomic_load_n(&si->waitpid_status, __ATOMIC_RELAXED); + bool exited = __atomic_load_n(&si->exited, __ATOMIC_RELAXED); + if(!exited) { + if(waitpid(si->child_pid, &status, 0) != si->child_pid) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: failed to wait for pid %d: %s", + si->child_pid, si->cmdline); + status = -1; + } + else { + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN PARENT: child with pid %d exited with status %d (waitpid): %s", + si->child_pid, status, si->cmdline); + } + } + else + nd_log(NDLS_COLLECTORS, NDLP_INFO, + "SPAWN PARENT: child with pid %d exited with status %d (sighandler): %s", + si->child_pid, status, si->cmdline); + + spinlock_lock(&spawn_globals.spinlock); + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(spawn_globals.instances, si, prev, next); + spinlock_unlock(&spawn_globals.spinlock); + + freez((void *)si->cmdline); + freez(si); + return status; +} + +#endif diff --git a/src/libnetdata/spawn_server/spawn_server_windows.c b/src/libnetdata/spawn_server/spawn_server_windows.c new file mode 100644 index 00000000000000..29f5a58d71b625 --- /dev/null +++ b/src/libnetdata/spawn_server/spawn_server_windows.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "spawn_server_internals.h" + +#if defined(SPAWN_SERVER_VERSION_WINDOWS) + +int spawn_server_instance_read_fd(SPAWN_INSTANCE *si) { return si->read_fd; } +int spawn_server_instance_write_fd(SPAWN_INSTANCE *si) { return si->write_fd; } +void spawn_server_instance_read_fd_unset(SPAWN_INSTANCE *si) { si->read_fd = -1; } +void spawn_server_instance_write_fd_unset(SPAWN_INSTANCE *si) { si->write_fd = -1; } + +pid_t spawn_server_instance_pid(SPAWN_INSTANCE *si) { + if(si->child_pid != -1) + return si->child_pid; + + return (pid_t)si->dwProcessId; +} + +static void update_cygpath_env(void) { + static volatile bool done = false; + + if(done) return; + done = true; + + char win_path[MAX_PATH]; + + // Convert Cygwin root path to Windows path + cygwin_conv_path(CCP_POSIX_TO_WIN_A, "/", win_path, sizeof(win_path)); + + nd_setenv("NETDATA_CYGWIN_BASE_PATH", win_path, 1); + + nd_log(NDLS_COLLECTORS, NDLP_INFO, "Cygwin/MSYS2 base path set to '%s'", win_path); +} + +SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options __maybe_unused, const char *name, spawn_request_callback_t cb __maybe_unused, int argc __maybe_unused, const char **argv __maybe_unused) { + update_cygpath_env(); + + SPAWN_SERVER* server = callocz(1, sizeof(SPAWN_SERVER)); + if(name) + server->name = strdupz(name); + else + server->name = strdupz("unnamed"); + return server; +} + +void spawn_server_destroy(SPAWN_SERVER *server) { + if (server) { + freez((void *)server->name); + freez(server); + } +} + +static BUFFER *argv_to_windows(const char **argv) { + BUFFER *wb = buffer_create(0, NULL); + + // argv[0] is the path + char b[strlen(argv[0]) * 2 + 1024]; + cygwin_conv_path(CCP_POSIX_TO_WIN_A | CCP_ABSOLUTE, argv[0], b, sizeof(b)); + + for(size_t i = 0; argv[i] ;i++) { + const char *s = (i == 0) ? b : argv[i]; + size_t len = strlen(s); + buffer_need_bytes(wb, len * 2 + 1); + + bool needs_quotes = false; + for(const char *c = s; !needs_quotes && *c ; c++) { + switch(*c) { + case ' ': + case '\v': + case '\t': + case '\n': + case '"': + needs_quotes = true; + break; + + default: + break; + } + } + + if(buffer_strlen(wb)) { + if (needs_quotes) + buffer_strcat(wb, " \""); + else + buffer_putc(wb, ' '); + } + + for(const char *c = s; *c ; c++) { + switch(*c) { + case '"': + buffer_putc(wb, '\\'); + // fall through + + default: + buffer_putc(wb, *c); + break; + } + } + + if(needs_quotes) + buffer_strcat(wb, "\""); + } + + return wb; +} + +int set_fd_blocking(int fd) { + int flags = fcntl(fd, F_GETFL, 0); + if (flags == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: fcntl(F_GETFL) failed"); + return -1; + } + + flags &= ~O_NONBLOCK; + if (fcntl(fd, F_SETFL, flags) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: fcntl(F_SETFL) failed"); + return -1; + } + + return 0; +} + +//static void print_environment_block(char *env_block) { +// if (env_block == NULL) { +// fprintf(stderr, "Environment block is NULL\n"); +// return; +// } +// +// char *env = env_block; +// while (*env) { +// fprintf(stderr, "ENVIRONMENT: %s\n", env); +// // Move to the next string in the block +// env += strlen(env) + 1; +// } +//} + +SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custom_fd __maybe_unused, const char **argv, const void *data __maybe_unused, size_t data_size __maybe_unused, SPAWN_INSTANCE_TYPE type) { + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + + if (type != SPAWN_INSTANCE_TYPE_EXEC) + return NULL; + + int pipe_stdin[2] = { -1, -1 }, pipe_stdout[2] = { -1, -1 }; + + errno_clear(); + + SPAWN_INSTANCE *instance = callocz(1, sizeof(*instance)); + instance->request_id = __atomic_add_fetch(&server->request_id, 1, __ATOMIC_RELAXED); + + CLEAN_BUFFER *wb = argv_to_windows(argv); + char *command = (char *)buffer_tostring(wb); + + if (pipe(pipe_stdin) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Cannot create stdin pipe() for request No %zu, command: %s", + instance->request_id, command); + goto cleanup; + } + + if (pipe(pipe_stdout) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Cannot create stdout pipe() for request No %zu, command: %s", + instance->request_id, command); + goto cleanup; + } + + // Ensure pipes are in blocking mode + if (set_fd_blocking(pipe_stdin[PIPE_READ]) == -1 || set_fd_blocking(pipe_stdin[PIPE_WRITE]) == -1 || + set_fd_blocking(pipe_stdout[PIPE_READ]) == -1 || set_fd_blocking(pipe_stdout[PIPE_WRITE]) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Failed to set blocking I/O on pipes for request No %zu, command: %s", + instance->request_id, command); + } + + // do not run multiple times this section + // to prevent handles leaking + spinlock_lock(&spinlock); + + // Convert POSIX file descriptors to Windows handles + HANDLE stdin_read_handle = (HANDLE)_get_osfhandle(pipe_stdin[PIPE_READ]); + HANDLE stdout_write_handle = (HANDLE)_get_osfhandle(pipe_stdout[PIPE_WRITE]); + HANDLE stderr_handle = (HANDLE)_get_osfhandle(stderr_fd); + + if (stdin_read_handle == INVALID_HANDLE_VALUE || stdout_write_handle == INVALID_HANDLE_VALUE || stderr_handle == INVALID_HANDLE_VALUE) { + spinlock_unlock(&spinlock); + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Invalid handle value(s) for request No %zu, command: %s", + instance->request_id, command); + goto cleanup; + } + + // Set handle inheritance + if (!SetHandleInformation(stdin_read_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT) || + !SetHandleInformation(stdout_write_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT) || + !SetHandleInformation(stderr_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT)) { + spinlock_unlock(&spinlock); + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Cannot set handle(s) inheritance for request No %zu, command: %s", + instance->request_id, command); + goto cleanup; + } + + // Set up the STARTUPINFO structure + STARTUPINFO si; + PROCESS_INFORMATION pi; + ZeroMemory(&si, sizeof(si)); + si.cb = sizeof(si); + si.dwFlags = STARTF_USESTDHANDLES; + si.hStdInput = stdin_read_handle; + si.hStdOutput = stdout_write_handle; + si.hStdError = stderr_handle; + + // Retrieve the current environment block + char* env_block = GetEnvironmentStrings(); +// print_environment_block(env_block); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Running request No %zu, command: '%s'", + instance->request_id, command); + + int fds[3] = { pipe_stdin[PIPE_READ], pipe_stdout[PIPE_WRITE], stderr_fd }; + os_close_all_non_std_open_fds_except(fds, 3, CLOSE_RANGE_CLOEXEC); + + // Spawn the process + errno_clear(); + if (!CreateProcess(NULL, command, NULL, NULL, TRUE, 0, env_block, NULL, &si, &pi)) { + spinlock_unlock(&spinlock); + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: cannot CreateProcess() for request No %zu, command: %s", + instance->request_id, command); + goto cleanup; + } + + FreeEnvironmentStrings(env_block); + + // When we create a process with the CreateProcess function, it returns two handles: + // - one for the process (pi.hProcess) and + // - one for the primary thread of the new process (pi.hThread). + // Both of these handles need to be explicitly closed when they are no longer needed. + CloseHandle(pi.hThread); + + // end of the critical section + spinlock_unlock(&spinlock); + + // Close unused pipe ends + close(pipe_stdin[PIPE_READ]); pipe_stdin[PIPE_READ] = -1; + close(pipe_stdout[PIPE_WRITE]); pipe_stdout[PIPE_WRITE] = -1; + + // Store process information in instance + instance->dwProcessId = pi.dwProcessId; + instance->child_pid = cygwin_winpid_to_pid((pid_t)pi.dwProcessId); + instance->process_handle = pi.hProcess; + + // Convert handles to POSIX file descriptors + instance->write_fd = pipe_stdin[PIPE_WRITE]; + instance->read_fd = pipe_stdout[PIPE_READ]; + + errno_clear(); + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: created process for request No %zu, pid %d (winpid %d), command: %s", + instance->request_id, (int)instance->child_pid, (int)pi.dwProcessId, command); + + return instance; + + cleanup: + if (pipe_stdin[PIPE_READ] >= 0) close(pipe_stdin[PIPE_READ]); + if (pipe_stdin[PIPE_WRITE] >= 0) close(pipe_stdin[PIPE_WRITE]); + if (pipe_stdout[PIPE_READ] >= 0) close(pipe_stdout[PIPE_READ]); + if (pipe_stdout[PIPE_WRITE] >= 0) close(pipe_stdout[PIPE_WRITE]); + freez(instance); + return NULL; +} + +static char* GetErrorString(DWORD errorCode) { + DWORD lastError = GetLastError(); + + LPVOID lpMsgBuf; + DWORD bufLen = FormatMessage( + FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, + errorCode, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (LPTSTR) &lpMsgBuf, + 0, NULL ); + + SetLastError(lastError); + + if (bufLen) { + char* errorString = (char*)LocalAlloc(LMEM_FIXED, bufLen + 1); + if (errorString) { + strcpy(errorString, (char*)lpMsgBuf); + } + LocalFree(lpMsgBuf); + return errorString; + } + + return NULL; +} + +static void TerminateChildProcesses(SPAWN_INSTANCE *si) { + HANDLE hSnapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0); + if (hSnapshot == INVALID_HANDLE_VALUE) + return; + + PROCESSENTRY32 pe; + pe.dwSize = sizeof(PROCESSENTRY32); + + if (Process32First(hSnapshot, &pe)) { + do { + if (pe.th32ParentProcessID == si->dwProcessId) { + HANDLE hChildProcess = OpenProcess(PROCESS_TERMINATE, FALSE, pe.th32ProcessID); + if (hChildProcess) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: killing subprocess %u of request No %zu, pid %d (winpid %u)", + pe.th32ProcessID, si->request_id, (int)si->child_pid, si->dwProcessId); + + TerminateProcess(hChildProcess, STATUS_CONTROL_C_EXIT); + CloseHandle(hChildProcess); + } + } + } while (Process32Next(hSnapshot, &pe)); + } + + CloseHandle(hSnapshot); +} + +int map_status_code_to_signal(DWORD status_code) { + switch (status_code) { + case STATUS_ACCESS_VIOLATION: + return SIGSEGV; + case STATUS_ILLEGAL_INSTRUCTION: + return SIGILL; + case STATUS_FLOAT_DIVIDE_BY_ZERO: + case STATUS_INTEGER_DIVIDE_BY_ZERO: + case STATUS_ARRAY_BOUNDS_EXCEEDED: + case STATUS_FLOAT_OVERFLOW: + case STATUS_FLOAT_UNDERFLOW: + case STATUS_FLOAT_INVALID_OPERATION: + return SIGFPE; + case STATUS_BREAKPOINT: + case STATUS_SINGLE_STEP: + return SIGTRAP; + case STATUS_STACK_OVERFLOW: + case STATUS_INVALID_HANDLE: + case STATUS_INVALID_PARAMETER: + case STATUS_NO_MEMORY: + case STATUS_PRIVILEGED_INSTRUCTION: + case STATUS_DLL_NOT_FOUND: + case STATUS_DLL_INIT_FAILED: + case STATUS_ORDINAL_NOT_FOUND: + case STATUS_ENTRYPOINT_NOT_FOUND: + case STATUS_CONTROL_STACK_VIOLATION: + case STATUS_STACK_BUFFER_OVERRUN: + case STATUS_ASSERTION_FAILURE: + case STATUS_INVALID_CRUNTIME_PARAMETER: + case STATUS_HEAP_CORRUPTION: + return SIGABRT; + case STATUS_CONTROL_C_EXIT: + return SIGTERM; // we use this internally as such + case STATUS_FATAL_APP_EXIT: + return SIGTERM; + default: + return (status_code & 0xFF) << 8; + } +} + +int spawn_server_exec_kill(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *si) { + if(si->child_pid != -1 && kill(si->child_pid, SIGTERM) != 0) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: child of request No %zu, pid %d (winpid %u), failed to be killed", + si->request_id, (int)si->child_pid, si->dwProcessId); + + // this gives some warnings at the spawn-tester, but it is generally better + // to have them, to avoid abnormal shutdown of the plugins + if(si->read_fd != -1) { close(si->read_fd); si->read_fd = -1; } + if(si->write_fd != -1) { close(si->write_fd); si->write_fd = -1; } + + errno_clear(); + if(TerminateProcess(si->process_handle, STATUS_CONTROL_C_EXIT) == 0) + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: child of request No %zu, pid %d (winpid %u), failed to be terminated", + si->request_id, (int)si->child_pid, si->dwProcessId); + + errno_clear(); + TerminateChildProcesses(si); + + return spawn_server_exec_wait(server, si); +} + +int spawn_server_exec_wait(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *si) { + if(si->read_fd != -1) { close(si->read_fd); si->read_fd = -1; } + if(si->write_fd != -1) { close(si->write_fd); si->write_fd = -1; } + + // wait for the process to end + WaitForSingleObject(si->process_handle, INFINITE); + + DWORD exit_code = -1; + GetExitCodeProcess(si->process_handle, &exit_code); + CloseHandle(si->process_handle); + + char *err = GetErrorString(exit_code); + + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: child of request No %zu, pid %d (winpid %u), exited with code %u (0x%x): %s", + si->request_id, (int)si->child_pid, si->dwProcessId, + (unsigned)exit_code, (unsigned)exit_code, err ? err : "(no reason text)"); + + if(err) + LocalFree(err); + + freez(si); + return map_status_code_to_signal(exit_code); +} + +#endif diff --git a/src/libnetdata/string/string.h b/src/libnetdata/string/string.h index c44696be2561b7..3408f52591c15d 100644 --- a/src/libnetdata/string/string.h +++ b/src/libnetdata/string/string.h @@ -36,4 +36,11 @@ int string_unittest(size_t entries); void string_init(void); +static inline void cleanup_string_pp(STRING **stringpp) { + if(stringpp) + string_freez(*stringpp); +} + +#define CLEAN_STRING _cleanup_(cleanup_string_pp) STRING + #endif diff --git a/src/libnetdata/uuid/uuid.h b/src/libnetdata/uuid/uuid.h index cde457616e7a15..91d2ad56f5a925 100644 --- a/src/libnetdata/uuid/uuid.h +++ b/src/libnetdata/uuid/uuid.h @@ -37,6 +37,8 @@ ND_UUID UUID_generate_from_hash(const void *payload, size_t payload_len); #define UUIDeq(a, b) ((a).parts.hig64 == (b).parts.hig64 && (a).parts.low64 == (b).parts.low64) +#define UUIDiszero(a) (UUIDeq(a, UUID_ZERO)) + static inline ND_UUID uuid2UUID(const nd_uuid_t uu1) { // uu1 may not be aligned, so copy it to the output ND_UUID copy; diff --git a/src/registry/registry.c b/src/registry/registry.c index 803115231ec2f6..bf303a69ef800a 100644 --- a/src/registry/registry.c +++ b/src/registry/registry.c @@ -154,8 +154,8 @@ static inline int registry_person_url_callback_verify_machine_exists(REGISTRY_PE // that could make this safe, so try to be as atomic as possible. void registry_update_cloud_base_url() { - registry.cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", DEFAULT_CLOUD_BASE_URL); - setenv("NETDATA_REGISTRY_CLOUD_BASE_URL", registry.cloud_base_url, 1); + registry.cloud_base_url = cloud_config_url_get(); + nd_setenv("NETDATA_REGISTRY_CLOUD_BASE_URL", registry.cloud_base_url, 1); } // ---------------------------------------------------------------------------- @@ -164,21 +164,19 @@ void registry_update_cloud_base_url() { int registry_request_hello_json(RRDHOST *host, struct web_client *w, bool do_not_track) { registry_json_header(host, w, "hello", REGISTRY_STATUS_OK); - if(host->node_id) + if(!uuid_is_null(host->node_id)) buffer_json_member_add_uuid(w->response.data, "node_id", host->node_id); buffer_json_member_add_object(w->response.data, "agent"); { buffer_json_member_add_string(w->response.data, "machine_guid", localhost->machine_guid); - if(localhost->node_id) + if(!uuid_is_null(localhost->node_id)) buffer_json_member_add_uuid(w->response.data, "node_id", localhost->node_id); - char *claim_id = get_agent_claimid(); - if (claim_id) { - buffer_json_member_add_string(w->response.data, "claim_id", claim_id); - freez(claim_id); - } + CLAIM_ID claim_id = claim_id_get(); + if (claim_id_is_set(claim_id)) + buffer_json_member_add_string(w->response.data, "claim_id", claim_id.str); buffer_json_member_add_boolean(w->response.data, "bearer_protection", netdata_is_protected_by_bearer); } @@ -198,7 +196,7 @@ int registry_request_hello_json(RRDHOST *host, struct web_client *w, bool do_not buffer_json_add_array_item_object(w->response.data); buffer_json_member_add_string(w->response.data, "machine_guid", h->machine_guid); - if(h->node_id) + if(!uuid_is_null(h->node_id)) buffer_json_member_add_uuid(w->response.data, "node_id", h->node_id); buffer_json_member_add_string(w->response.data, "hostname", rrdhost_registry_hostname(h)); diff --git a/src/registry/registry_init.c b/src/registry/registry_init.c index c291c6f822573b..9093610eab537d 100644 --- a/src/registry/registry_init.c +++ b/src/registry/registry_init.c @@ -101,8 +101,8 @@ int registry_init(void) { registry.enable_cookies_samesite_secure = config_get_boolean(CONFIG_SECTION_REGISTRY, "enable cookies SameSite and Secure", 1); registry_update_cloud_base_url(); - setenv("NETDATA_REGISTRY_HOSTNAME", registry.hostname, 1); - setenv("NETDATA_REGISTRY_URL", registry.registry_to_announce, 1); + nd_setenv("NETDATA_REGISTRY_HOSTNAME", registry.hostname, 1); + nd_setenv("NETDATA_REGISTRY_URL", registry.registry_to_announce, 1); registry.max_url_length = (size_t)config_get_number(CONFIG_SECTION_REGISTRY, "max URL length", 1024); if(registry.max_url_length < 10) { diff --git a/src/registry/registry_internals.c b/src/registry/registry_internals.c index 54fad4254f24b6..5e83bdb79f2a07 100644 --- a/src/registry/registry_internals.c +++ b/src/registry/registry_internals.c @@ -315,7 +315,7 @@ const char *registry_get_this_machine_guid(void) { close(fd); } - setenv("NETDATA_REGISTRY_UNIQUE_ID", guid, 1); + nd_setenv("NETDATA_REGISTRY_UNIQUE_ID", guid, 1); return guid; } diff --git a/src/registry/registry_internals.h b/src/registry/registry_internals.h index c7f8f43dd15a0b..c2270eb83c3929 100644 --- a/src/registry/registry_internals.h +++ b/src/registry/registry_internals.h @@ -33,7 +33,7 @@ struct registry { char *registry_domain; char *hostname; char *registry_to_announce; - char *cloud_base_url; + const char *cloud_base_url; time_t persons_expiration; // seconds to expire idle persons int verify_cookies_redirects; int enable_cookies_samesite_secure; diff --git a/src/streaming/protocol/command-claimed_id.c b/src/streaming/protocol/command-claimed_id.c new file mode 100644 index 00000000000000..b4c815203b7289 --- /dev/null +++ b/src/streaming/protocol/command-claimed_id.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "commands.h" +#include "collectors/plugins.d/pluginsd_internals.h" + +PARSER_RC rrdpush_receiver_pluginsd_claimed_id(char **words, size_t num_words, PARSER *parser) { + const char *machine_guid_str = get_word(words, num_words, 1); + const char *claim_id_str = get_word(words, num_words, 2); + + if (!machine_guid_str || !claim_id_str) { + netdata_log_error("PLUGINSD: command CLAIMED_ID came malformed, machine_guid '%s', claim_id '%s'", + machine_guid_str ? machine_guid_str : "[unset]", + claim_id_str ? claim_id_str : "[unset]"); + return PARSER_RC_ERROR; + } + + RRDHOST *host = parser->user.host; + + nd_uuid_t machine_uuid; + if(uuid_parse(machine_guid_str, machine_uuid)) { + netdata_log_error("PLUGINSD: parameter machine guid to CLAIMED_ID command is not valid UUID. " + "Received: '%s'.", machine_guid_str); + return PARSER_RC_ERROR; + } + + nd_uuid_t claim_uuid; + if(strcmp(claim_id_str, "NULL") == 0) + uuid_clear(claim_uuid); + + else if(uuid_parse(claim_id_str, claim_uuid) != 0) { + netdata_log_error("PLUGINSD: parameter claim id to CLAIMED_ID command is not valid UUID. " + "Received: '%s'.", claim_id_str); + return PARSER_RC_ERROR; + } + + if(strcmp(machine_guid_str, host->machine_guid) != 0) { + netdata_log_error("PLUGINSD: received claim id for host '%s' but it came over the connection of '%s'", + machine_guid_str, host->machine_guid); + return PARSER_RC_OK; //the message is OK problem must be somewhere else + } + + if(host == localhost) { + netdata_log_error("PLUGINSD: CLAIMED_ID command cannot be used to set the claimed id of localhost. " + "Received: '%s'.", claim_id_str); + return PARSER_RC_OK; + } + + if(!uuid_is_null(claim_uuid)) { + uuid_copy(host->aclk.claim_id_of_origin.uuid, claim_uuid); + rrdpush_sender_send_claimed_id(host); + } + + return PARSER_RC_OK; +} + +void rrdpush_sender_send_claimed_id(RRDHOST *host) { + if(!stream_has_capability(host->sender, STREAM_CAP_CLAIM)) + return; + + if(unlikely(!rrdhost_can_send_definitions_to_parent(host))) + return; + + BUFFER *wb = sender_start(host->sender); + + char str[UUID_STR_LEN] = ""; + ND_UUID uuid = host->aclk.claim_id_of_origin; + if(!UUIDiszero(uuid)) + uuid_unparse_lower(uuid.uuid, str); + else + strncpyz(str, "NULL", sizeof(str) - 1); + + buffer_sprintf(wb, PLUGINSD_KEYWORD_CLAIMED_ID " '%s' '%s'\n", + host->machine_guid, str); + + sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA); + + sender_thread_buffer_free(); +} diff --git a/src/streaming/protocol/command-nodeid.c b/src/streaming/protocol/command-nodeid.c new file mode 100644 index 00000000000000..bf003b6746d761 --- /dev/null +++ b/src/streaming/protocol/command-nodeid.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "commands.h" +#include "collectors/plugins.d/pluginsd_internals.h" + +// the child disconnected from the parent, and it has to clear the parent's claim id +void rrdpush_sender_clear_child_claim_id(RRDHOST *host) { + host->aclk.claim_id_of_parent = UUID_ZERO; +} + +// the parent sends to the child its claim id, node id and cloud url +void rrdpush_receiver_send_node_and_claim_id_to_child(RRDHOST *host) { + if(host == localhost || uuid_is_null(host->node_id)) return; + + spinlock_lock(&host->receiver_lock); + if(host->receiver && stream_has_capability(host->receiver, STREAM_CAP_NODE_ID)) { + char node_id_str[UUID_STR_LEN] = ""; + uuid_unparse_lower(host->node_id, node_id_str); + + CLAIM_ID claim_id = claim_id_get(); + + if((!claim_id_is_set(claim_id) || !aclk_online())) { + // the agent is not claimed or not connected, just use parent claim id + // to allow the connection flow. + // this may be zero and it is ok. + claim_id.uuid = host->aclk.claim_id_of_parent; + uuid_unparse_lower(claim_id.uuid.uuid, claim_id.str); + } + + char buf[4096]; + snprintfz(buf, sizeof(buf), + PLUGINSD_KEYWORD_NODE_ID " '%s' '%s' '%s'\n", + claim_id.str, node_id_str, cloud_config_url_get()); + + send_to_plugin(buf, __atomic_load_n(&host->receiver->parser, __ATOMIC_RELAXED)); + } + spinlock_unlock(&host->receiver_lock); +} + +// the sender of the child receives node id, claim id and cloud url from the receiver of the parent +void rrdpush_sender_get_node_and_claim_id_from_parent(struct sender_state *s) { + char *claim_id = get_word(s->line.words, s->line.num_words, 1); + char *node_id = get_word(s->line.words, s->line.num_words, 2); + char *url = get_word(s->line.words, s->line.num_words, 3); + + bool claimed = is_agent_claimed(); + + ND_UUID claim_uuid; + if (uuid_parse(claim_id ? claim_id : "", claim_uuid.uuid) != 0) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "STREAM %s [send to %s] received invalid claim id '%s'", + rrdhost_hostname(s->host), s->connected_to, + claim_id ? claim_id : "(unset)"); + return; + } + + ND_UUID node_uuid; + if(uuid_parse(node_id ? node_id : "", node_uuid.uuid) != 0) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "STREAM %s [send to %s] received an invalid node id '%s'", + rrdhost_hostname(s->host), s->connected_to, + node_id ? node_id : "(unset)"); + return; + } + + if (!UUIDiszero(s->host->aclk.claim_id_of_parent) && !UUIDeq(s->host->aclk.claim_id_of_parent, claim_uuid)) + nd_log(NDLS_DAEMON, NDLP_INFO, + "STREAM %s [send to %s] changed parent's claim id to %s", + rrdhost_hostname(s->host), s->connected_to, claim_id ? claim_id : "(unset)"); + + if(!uuid_is_null(s->host->node_id) && uuid_compare(s->host->node_id, node_uuid.uuid) != 0) { + if(claimed) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "STREAM %s [send to %s] parent reports different node id '%s', but we are claimed. Ignoring it.", + rrdhost_hostname(s->host), s->connected_to, node_id ? node_id : "(unset)"); + return; + } + else + nd_log(NDLS_DAEMON, NDLP_WARNING, + "STREAM %s [send to %s] changed node id to %s", + rrdhost_hostname(s->host), s->connected_to, node_id ? node_id : "(unset)"); + } + + if(!url || !*url) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "STREAM %s [send to %s] received an invalid cloud URL '%s'", + rrdhost_hostname(s->host), s->connected_to, + url ? url : "(unset)"); + return; + } + + s->host->aclk.claim_id_of_parent = claim_uuid; + + // There are some very strange corner cases here: + // + // - Agent is claimed but offline, and it receives node_id and cloud_url from a different Netdata Cloud. + // - Agent is configured to talk to an on-prem Netdata Cloud, it is offline, but the parent is connected + // to a different Netdata Cloud. + // + // The solution below, tries to get the agent online, using the latest information. + // So, if the agent is not claimed or not connected, we inherit whatever information sent from the parent, + // to allow the user to work with it. + + if(claimed && aclk_online()) + // we are directly claimed and connected, ignore node id and cloud url + return; + + if(uuid_is_null(s->host->node_id)) + uuid_copy(s->host->node_id, node_uuid.uuid); + + // we change the URL, to allow the agent dashboard to work with Netdata Cloud on-prem, if any. + cloud_config_url_set(url); + + // send it down the line (to children) + rrdpush_receiver_send_node_and_claim_id_to_child(s->host); +} diff --git a/src/streaming/protocol/commands.c b/src/streaming/protocol/commands.c new file mode 100644 index 00000000000000..95a34529b6497b --- /dev/null +++ b/src/streaming/protocol/commands.c @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "commands.h" diff --git a/src/streaming/protocol/commands.h b/src/streaming/protocol/commands.h new file mode 100644 index 00000000000000..a75713755f2231 --- /dev/null +++ b/src/streaming/protocol/commands.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_STREAMING_PROTCOL_COMMANDS_H +#define NETDATA_STREAMING_PROTCOL_COMMANDS_H + +#include "../rrdpush.h" + +void rrdpush_sender_get_node_and_claim_id_from_parent(struct sender_state *s); +void rrdpush_receiver_send_node_and_claim_id_to_child(RRDHOST *host); +void rrdpush_sender_clear_child_claim_id(RRDHOST *host); + +void rrdpush_sender_send_claimed_id(RRDHOST *host); + +#endif //NETDATA_STREAMING_PROTCOL_COMMANDS_H diff --git a/src/streaming/receiver.c b/src/streaming/receiver.c index 50da031a71e804..ecb23af6310cb4 100644 --- a/src/streaming/receiver.c +++ b/src/streaming/receiver.c @@ -19,9 +19,7 @@ void receiver_state_free(struct receiver_state *rpt) { freez(rpt->program_name); freez(rpt->program_version); -#ifdef ENABLE_HTTPS netdata_ssl_close(&rpt->ssl); -#endif if(rpt->fd != -1) { internal_error(true, "closing socket..."); @@ -73,9 +71,7 @@ static inline int read_stream(struct receiver_state *r, char* buffer, size_t siz errno_clear(); switch(wait_on_socket_or_cancel_with_timeout( -#ifdef ENABLE_HTTPS &r->ssl, -#endif r->fd, 0, POLLIN, NULL)) { case 0: // data are waiting @@ -95,14 +91,10 @@ static inline int read_stream(struct receiver_state *r, char* buffer, size_t siz return -2; } -#ifdef ENABLE_HTTPS if (SSL_connection(&r->ssl)) bytes_read = netdata_ssl_read(&r->ssl, buffer, size); else bytes_read = read(r->fd, buffer, size); -#else - bytes_read = read(r->fd, buffer, size); -#endif } while(bytes_read < 0 && errno == EINTR && tries--); @@ -327,7 +319,7 @@ static size_t streaming_parser(struct receiver_state *rpt, struct plugind *cd, i .capabilities = rpt->capabilities, }; - parser = parser_init(&user, NULL, NULL, fd, PARSER_INPUT_SPLIT, ssl); + parser = parser_init(&user, fd, fd, PARSER_INPUT_SPLIT, ssl); } #ifdef ENABLE_H2O @@ -338,10 +330,6 @@ static size_t streaming_parser(struct receiver_state *rpt, struct plugind *cd, i rrd_collector_started(); - // this keeps the parser with its current value - // so, parser needs to be allocated before pushing it - CLEANUP_FUNCTION_REGISTER(pluginsd_process_thread_cleanup) parser_ptr = parser; - bool compressed_connection = rrdpush_decompression_initialize(rpt); buffered_reader_init(&rpt->reader); @@ -367,6 +355,9 @@ static size_t streaming_parser(struct receiver_state *rpt, struct plugind *cd, i }; ND_LOG_STACK_PUSH(lgs); + __atomic_store_n(&rpt->parser, parser, __ATOMIC_RELAXED); + rrdpush_receiver_send_node_and_claim_id_to_child(rpt->host); + while(!receiver_should_stop(rpt)) { if(!buffered_reader_next_line(&rpt->reader, buffer)) { @@ -391,6 +382,13 @@ static size_t streaming_parser(struct receiver_state *rpt, struct plugind *cd, i buffer->len = 0; buffer->buffer[0] = '\0'; } + + // make sure send_to_plugin() will not write any data to the socket + spinlock_lock(&parser->writer.spinlock); + parser->fd_output = -1; + parser->ssl_output = NULL; + spinlock_unlock(&parser->writer.spinlock); + result = parser->user.data_collections_count; return result; } @@ -409,7 +407,7 @@ static bool rrdhost_set_receiver(RRDHOST *host, struct receiver_state *rpt) { bool signal_rrdcontext = false; bool set_this = false; - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); if (!host->receiver) { rrdhost_flag_clear(host, RRDHOST_FLAG_ORPHAN); @@ -452,7 +450,7 @@ static bool rrdhost_set_receiver(RRDHOST *host, struct receiver_state *rpt) { set_this = true; } - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); if(signal_rrdcontext) rrdcontext_host_child_connected(host); @@ -464,13 +462,16 @@ static void rrdhost_clear_receiver(struct receiver_state *rpt) { RRDHOST *host = rpt->host; if(host) { bool signal_rrdcontext = false; - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); // Make sure that we detach this thread and don't kill a freshly arriving receiver if(host->receiver == rpt) { __atomic_sub_fetch(&localhost->connected_children_count, 1, __ATOMIC_RELAXED); rrdhost_flag_set(rpt->host, RRDHOST_FLAG_RRDPUSH_RECEIVER_DISCONNECTED); + pluginsd_process_cleanup(rpt->parser); + __atomic_store_n(&rpt->parser, NULL, __ATOMIC_RELAXED); + host->trigger_chart_obsoletion_check = 0; host->child_connect_time = 0; host->child_disconnected_time = now_realtime_sec(); @@ -490,7 +491,7 @@ static void rrdhost_clear_receiver(struct receiver_state *rpt) { rrdcalc_child_disconnected(host); } - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); if(signal_rrdcontext) rrdcontext_host_child_disconnected(host); @@ -502,7 +503,7 @@ static void rrdhost_clear_receiver(struct receiver_state *rpt) { bool stop_streaming_receiver(RRDHOST *host, STREAM_HANDSHAKE reason) { bool ret = false; - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); if(host->receiver) { if(!host->receiver->exit.shutdown) { @@ -516,12 +517,12 @@ bool stop_streaming_receiver(RRDHOST *host, STREAM_HANDSHAKE reason) { int count = 2000; while (host->receiver && count-- > 0) { - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); // let the lock for the receiver thread to exit sleep_usec(1 * USEC_PER_MS); - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); } if(host->receiver) @@ -533,16 +534,14 @@ bool stop_streaming_receiver(RRDHOST *host, STREAM_HANDSHAKE reason) { else ret = true; - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); return ret; } static void rrdpush_send_error_on_taken_over_connection(struct receiver_state *rpt, const char *msg) { (void) send_timeout( -#ifdef ENABLE_HTTPS &rpt->ssl, -#endif rpt->fd, (char *)msg, strlen(msg), @@ -732,11 +731,7 @@ static void rrdpush_receive(struct receiver_state *rpt) , rpt->host->rrd_history_entries , rrd_memory_mode_name(rpt->host->rrd_memory_mode) , (rpt->config.health_enabled == CONFIG_BOOLEAN_NO)?"disabled":((rpt->config.health_enabled == CONFIG_BOOLEAN_YES)?"enabled":"auto") -#ifdef ENABLE_HTTPS , (rpt->ssl.conn != NULL) ? " SSL," : "" -#else - , "" -#endif ); #endif // NETDATA_INTERNAL_CHECKS @@ -786,9 +781,7 @@ static void rrdpush_receive(struct receiver_state *rpt) } else { #endif ssize_t bytes_sent = send_timeout( -#ifdef ENABLE_HTTPS &rpt->ssl, -#endif rpt->fd, initial_response, strlen(initial_response), 0, 60); if(bytes_sent != (ssize_t)strlen(initial_response)) { @@ -830,12 +823,9 @@ static void rrdpush_receive(struct receiver_state *rpt) rpt, "connected and ready to receive data", RRDPUSH_STATUS_CONNECTED, NDLP_INFO); -#ifdef ENABLE_ACLK // in case we have cloud connection we inform cloud // new child connected - if (netdata_cloud_enabled) - aclk_host_state_update(rpt->host, 1, 1); -#endif + aclk_host_state_update(rpt->host, 1, 1); rrdhost_set_is_parent_label(); @@ -845,14 +835,10 @@ static void rrdpush_receive(struct receiver_state *rpt) // let it reconnect to parent immediately rrdpush_reset_destinations_postpone_time(rpt->host); - size_t count = streaming_parser(rpt, &cd, rpt->fd, -#ifdef ENABLE_HTTPS - (rpt->ssl.conn) ? &rpt->ssl : NULL -#else - NULL -#endif - ); + // receive data + size_t count = streaming_parser(rpt, &cd, rpt->fd, (rpt->ssl.conn) ? &rpt->ssl : NULL); + // the parser stopped receiver_set_exit_reason(rpt, STREAM_HANDSHAKE_DISCONNECT_PARSER_EXIT, false); { @@ -863,12 +849,9 @@ static void rrdpush_receive(struct receiver_state *rpt) RRDPUSH_STATUS_DISCONNECTED, NDLP_WARNING); } -#ifdef ENABLE_ACLK // in case we have cloud connection we inform cloud // a child disconnected - if (netdata_cloud_enabled) - aclk_host_state_update(rpt->host, 0, 1); -#endif + aclk_host_state_update(rpt->host, 0, 1); cleanup: ; @@ -903,11 +886,7 @@ static bool stream_receiver_log_transport(BUFFER *wb, void *ptr) { if(!rpt) return false; -#ifdef ENABLE_HTTPS buffer_strcat(wb, SSL_connection(&rpt->ssl) ? "https" : "http"); -#else - buffer_strcat(wb, "http"); -#endif return true; } diff --git a/src/streaming/replication.c b/src/streaming/replication.c index 1f5aeb34c09180..e354dee035447d 100644 --- a/src/streaming/replication.c +++ b/src/streaming/replication.c @@ -718,7 +718,7 @@ bool replication_response_execute_and_finalize(struct replication_query *q, size struct replication_request_details { struct { send_command callback; - void *data; + struct parser *parser; } caller; RRDHOST *host; @@ -826,7 +826,7 @@ static bool send_replay_chart_cmd(struct replication_request_details *r, const c rrdset_id(st), r->wanted.start_streaming ? "true" : "false", (unsigned long long)r->wanted.after, (unsigned long long)r->wanted.before); - ssize_t ret = r->caller.callback(buffer, r->caller.data); + ssize_t ret = r->caller.callback(buffer, r->caller.parser); if (ret < 0) { netdata_log_error("REPLAY ERROR: 'host:%s/chart:%s' failed to send replication request to child (error %zd)", rrdhost_hostname(r->host), rrdset_id(r->st), ret); @@ -836,14 +836,14 @@ static bool send_replay_chart_cmd(struct replication_request_details *r, const c return true; } -bool replicate_chart_request(send_command callback, void *callback_data, RRDHOST *host, RRDSET *st, +bool replicate_chart_request(send_command callback, struct parser *parser, RRDHOST *host, RRDSET *st, time_t child_first_entry, time_t child_last_entry, time_t child_wall_clock_time, time_t prev_first_entry_wanted, time_t prev_last_entry_wanted) { struct replication_request_details r = { .caller = { .callback = callback, - .data = callback_data, + .parser = parser, }, .host = host, diff --git a/src/streaming/replication.h b/src/streaming/replication.h index 507b7c32f75704..9448199fb96bfc 100644 --- a/src/streaming/replication.h +++ b/src/streaming/replication.h @@ -17,9 +17,9 @@ struct replication_query_statistics replication_get_query_statistics(void); bool replicate_chart_response(RRDHOST *rh, RRDSET *rs, bool start_streaming, time_t after, time_t before); -typedef ssize_t (*send_command)(const char *txt, void *data); +typedef ssize_t (*send_command)(const char *txt, struct parser *parser); -bool replicate_chart_request(send_command callback, void *callback_data, +bool replicate_chart_request(send_command callback, struct parser *parser, RRDHOST *rh, RRDSET *rs, time_t child_first_entry, time_t child_last_entry, time_t child_wall_clock_time, time_t response_first_start_time, time_t response_last_end_time); diff --git a/src/streaming/rrdpush.c b/src/streaming/rrdpush.c index 23a86e72070fd5..6b0cc9ebe11845 100644 --- a/src/streaming/rrdpush.c +++ b/src/streaming/rrdpush.c @@ -44,23 +44,21 @@ STREAM_CAPABILITIES globally_disabled_capabilities = STREAM_CAP_NONE; unsigned int default_rrdpush_compression_enabled = 1; char *default_rrdpush_destination = NULL; char *default_rrdpush_api_key = NULL; -char *default_rrdpush_send_charts_matching = NULL; +char *default_rrdpush_send_charts_matching = "*"; bool default_rrdpush_enable_replication = true; time_t default_rrdpush_seconds_to_replicate = 86400; time_t default_rrdpush_replication_step = 600; -#ifdef ENABLE_HTTPS char *netdata_ssl_ca_path = NULL; char *netdata_ssl_ca_file = NULL; -#endif static void load_stream_conf() { errno_clear(); - char *filename = strdupz_path_subpath(netdata_configured_user_config_dir, "stream.conf"); + char *filename = filename_from_path_entry_strdupz(netdata_configured_user_config_dir, "stream.conf"); if(!appconfig_load(&stream_config, filename, 0, NULL)) { nd_log_daemon(NDLP_NOTICE, "CONFIG: cannot load user config '%s'. Will try stock config.", filename); freez(filename); - filename = strdupz_path_subpath(netdata_configured_stock_config_dir, "stream.conf"); + filename = filename_from_path_entry_strdupz(netdata_configured_stock_config_dir, "stream.conf"); if(!appconfig_load(&stream_config, filename, 0, NULL)) nd_log_daemon(NDLP_NOTICE, "CONFIG: cannot load stock config '%s'. Running with internal defaults.", filename); } @@ -100,7 +98,7 @@ int rrdpush_init() { default_rrdpush_enabled = (unsigned int)appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, "enabled", default_rrdpush_enabled); default_rrdpush_destination = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "destination", ""); default_rrdpush_api_key = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "api key", ""); - default_rrdpush_send_charts_matching = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "send charts matching", "*"); + default_rrdpush_send_charts_matching = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "send charts matching", default_rrdpush_send_charts_matching); default_rrdpush_enable_replication = config_get_boolean(CONFIG_SECTION_DB, "enable replication", default_rrdpush_enable_replication); default_rrdpush_seconds_to_replicate = config_get_number(CONFIG_SECTION_DB, "seconds to replicate", default_rrdpush_seconds_to_replicate); @@ -132,7 +130,6 @@ int rrdpush_init() { default_rrdpush_enabled = 0; } -#ifdef ENABLE_HTTPS netdata_ssl_validate_certificate_sender = !appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, "ssl skip certificate verification", !netdata_ssl_validate_certificate); if(!netdata_ssl_validate_certificate_sender) @@ -140,7 +137,6 @@ int rrdpush_init() { netdata_ssl_ca_path = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CApath", NULL); netdata_ssl_ca_file = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CAfile", NULL); -#endif return default_rrdpush_enabled; } @@ -172,12 +168,31 @@ static inline bool should_send_chart_matching(RRDSET *st, RRDSET_FLAGS flags) { else rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_IGNORE); } - else if(simple_pattern_matches_string(host->rrdpush_send_charts_matching, st->id) || - simple_pattern_matches_string(host->rrdpush_send_charts_matching, st->name)) + else { + int negative = 0, positive = 0; + SIMPLE_PATTERN_RESULT r; - rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_SEND); - else - rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_IGNORE); + r = simple_pattern_matches_string_extract(host->rrdpush.send.charts_matching, st->context, NULL, 0); + if(r == SP_MATCHED_POSITIVE) positive++; + else if(r == SP_MATCHED_NEGATIVE) negative++; + + if(!negative) { + r = simple_pattern_matches_string_extract(host->rrdpush.send.charts_matching, st->name, NULL, 0); + if (r == SP_MATCHED_POSITIVE) positive++; + else if (r == SP_MATCHED_NEGATIVE) negative++; + } + + if(!negative) { + r = simple_pattern_matches_string_extract(host->rrdpush.send.charts_matching, st->id, NULL, 0); + if (r == SP_MATCHED_POSITIVE) positive++; + else if (r == SP_MATCHED_NEGATIVE) negative++; + } + + if(!negative && positive) + rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_SEND); + else + rrdset_flag_set(st, RRDSET_FLAG_UPSTREAM_IGNORE); + } // get the flags again, to know how to respond flags = rrdset_flag_check(st, RRDSET_FLAG_UPSTREAM_SEND|RRDSET_FLAG_UPSTREAM_IGNORE); @@ -578,24 +593,6 @@ void rrdpush_send_global_functions(RRDHOST *host) { sender_thread_buffer_free(); } -void rrdpush_send_claimed_id(RRDHOST *host) { - if(!stream_has_capability(host->sender, STREAM_CAP_CLAIM)) - return; - - if(unlikely(!rrdhost_can_send_definitions_to_parent(host))) - return; - - BUFFER *wb = sender_start(host->sender); - rrdhost_aclk_state_lock(host); - - buffer_sprintf(wb, "CLAIMED_ID %s %s\n", host->machine_guid, (host->aclk_state.claimed_id ? host->aclk_state.claimed_id : "NULL") ); - - rrdhost_aclk_state_unlock(host); - sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA); - - sender_thread_buffer_free(); -} - int connect_to_one_of_destinations( RRDHOST *host, int default_port, @@ -677,7 +674,7 @@ bool destinations_init_add_one(char *entry, void *data) { } void rrdpush_destinations_init(RRDHOST *host) { - if(!host->rrdpush_send_destination) return; + if(!host->rrdpush.send.destination) return; rrdpush_destinations_free(host); @@ -687,7 +684,7 @@ void rrdpush_destinations_init(RRDHOST *host) { .count = 0, }; - foreach_entry_in_connection_string(host->rrdpush_send_destination, destinations_init_add_one, &t); + foreach_entry_in_connection_string(host->rrdpush.send.destination, destinations_init_add_one, &t); host->destinations = t.list; } @@ -777,12 +774,10 @@ int rrdpush_receiver_too_busy_now(struct web_client *w) { static void rrdpush_receiver_takeover_web_connection(struct web_client *w, struct receiver_state *rpt) { rpt->fd = w->ifd; -#ifdef ENABLE_HTTPS rpt->ssl.conn = w->ssl.conn; rpt->ssl.state = w->ssl.state; w->ssl = NETDATA_SSL_UNSET_CONNECTION; -#endif WEB_CLIENT_IS_DEAD(w); @@ -825,9 +820,7 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_stri rpt->client_ip = strdupz(w->client_ip); rpt->client_port = strdupz(w->client_port); -#ifdef ENABLE_HTTPS rpt->ssl = NETDATA_SSL_UNSET_CONNECTION; -#endif rpt->config.update_every = default_rrd_update_every; @@ -1083,9 +1076,7 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_stri snprintfz(initial_response, HTTP_HEADER_SIZE, "%s", START_STREAMING_ERROR_SAME_LOCALHOST); if(send_timeout( -#ifdef ENABLE_HTTPS &rpt->ssl, -#endif rpt->fd, initial_response, strlen(initial_response), 0, 60) != (ssize_t)strlen(initial_response)) { nd_log_daemon(NDLP_ERR, "STREAM '%s' [receive from [%s]:%s]: " @@ -1149,7 +1140,7 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_stri host = NULL; if (host) { - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); if (host->receiver) { age = now_monotonic_sec() - host->receiver->last_msg_t; @@ -1158,7 +1149,7 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_stri else receiver_stale = true; } - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); } rrd_rdunlock(); @@ -1294,6 +1285,7 @@ static struct { {STREAM_CAP_IEEE754, "IEEE754" }, {STREAM_CAP_DATA_WITH_ML, "ML" }, {STREAM_CAP_DYNCFG, "DYNCFG" }, + {STREAM_CAP_NODE_ID, "NODEID" }, {STREAM_CAP_SLOTS, "SLOTS" }, {STREAM_CAP_ZSTD, "ZSTD" }, {STREAM_CAP_GZIP, "GZIP" }, @@ -1352,12 +1344,12 @@ STREAM_CAPABILITIES stream_our_capabilities(RRDHOST *host, bool sender) { // we have DATA_WITH_ML capability // we should remove the DATA_WITH_ML capability if our database does not have anomaly info // this can happen under these conditions: 1. we don't run ML, and 2. we don't receive ML - netdata_mutex_lock(&host->receiver_lock); + spinlock_lock(&host->receiver_lock); if(!ml_host_running(host) && !stream_has_capability(host->receiver, STREAM_CAP_DATA_WITH_ML)) disabled_capabilities |= STREAM_CAP_DATA_WITH_ML; - netdata_mutex_unlock(&host->receiver_lock); + spinlock_unlock(&host->receiver_lock); if(host->sender) disabled_capabilities |= host->sender->disabled_capabilities; @@ -1378,6 +1370,7 @@ STREAM_CAPABILITIES stream_our_capabilities(RRDHOST *host, bool sender) { STREAM_CAP_PROGRESS | STREAM_CAP_COMPRESSIONS_AVAILABLE | STREAM_CAP_DYNCFG | + STREAM_CAP_NODE_ID | STREAM_CAP_IEEE754 | STREAM_CAP_DATA_WITH_ML | 0) & ~disabled_capabilities; diff --git a/src/streaming/rrdpush.h b/src/streaming/rrdpush.h index d55a07675e4683..e0d61dfcc99b91 100644 --- a/src/streaming/rrdpush.h +++ b/src/streaming/rrdpush.h @@ -54,6 +54,7 @@ typedef enum { STREAM_CAP_BROTLI = (1 << 21), // BROTLI compression supported STREAM_CAP_PROGRESS = (1 << 22), // Functions PROGRESS support STREAM_CAP_DYNCFG = (1 << 23), // support for DYNCFG + STREAM_CAP_NODE_ID = (1 << 24), // support for sending NODE_ID back to the child STREAM_CAP_INVALID = (1 << 30), // used as an invalid value for capabilities when this is set // this must be signed int, so don't use the last bit @@ -238,9 +239,7 @@ struct sender_state { FILE *stream_log_fp; #endif -#ifdef ENABLE_HTTPS NETDATA_SSL ssl; // structure used to encrypt the connection -#endif struct { bool shutdown; @@ -334,6 +333,8 @@ typedef struct stream_node_instance { } STREAM_NODE_INSTANCE; */ +struct parser; + struct receiver_state { RRDHOST *host; pid_t tid; @@ -382,9 +383,7 @@ struct receiver_state { STREAM_CAPABILITIES compression_priorities[COMPRESSION_ALGORITHM_MAX]; } config; -#ifdef ENABLE_HTTPS NETDATA_SSL ssl; -#endif time_t replication_first_time_t; @@ -396,6 +395,12 @@ struct receiver_state { } instances; */ + // The parser pointer is safe to read and use, only when having the host receiver lock. + // Without this lock, the data pointed by the pointer may vanish randomly. + // Also, since the receiver sets it when it starts, it should be read with + // an atomic read. + struct parser *parser; + #ifdef ENABLE_H2O void *h2o_ctx; #endif @@ -455,7 +460,6 @@ void rrddim_push_metrics_v2(RRDSET_STREAM_BUFFER *rsb, RRDDIM *rd, usec_t point_ bool rrdset_push_chart_definition_now(RRDSET *st); void *rrdpush_sender_thread(void *ptr); void rrdpush_send_host_labels(RRDHOST *host); -void rrdpush_send_claimed_id(RRDHOST *host); void rrdpush_send_global_functions(RRDHOST *host); int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_string, void *h2o_ctx); @@ -758,4 +762,6 @@ void rrdpush_parse_compression_order(struct receiver_state *rpt, const char *ord void rrdpush_select_receiver_compression_algorithm(struct receiver_state *rpt); void rrdpush_compression_deactivate(struct sender_state *s); +#include "protocol/commands.h" + #endif //NETDATA_RRDPUSH_H diff --git a/src/streaming/sender.c b/src/streaming/sender.c index a5fbe6044ef386..e55c0f80f91738 100644 --- a/src/streaming/sender.c +++ b/src/streaming/sender.c @@ -334,9 +334,7 @@ static void rrdpush_sender_after_connect(RRDHOST *host) { } static inline void rrdpush_sender_thread_close_socket(RRDHOST *host) { -#ifdef ENABLE_HTTPS netdata_ssl_close(&host->sender->ssl); -#endif if(host->sender->rrdpush_sender_socket != -1) { close(host->sender->rrdpush_sender_socket); @@ -349,6 +347,10 @@ static inline void rrdpush_sender_thread_close_socket(RRDHOST *host) { // do not flush the circular buffer here // this function is called sometimes with the mutex lock, sometimes without the lock rrdpush_sender_charts_and_replication_reset(host); + + // clear the parent's claim id + rrdpush_sender_clear_child_claim_id(host); + rrdpush_receiver_send_node_and_claim_id_to_child(host); } void rrdpush_encode_variable(stream_encoded_t *se, RRDHOST *host) { @@ -572,7 +574,6 @@ unsigned char alpn_proto_list[] = { #define CONN_UPGRADE_VAL "upgrade" static bool rrdpush_sender_connect_ssl(struct sender_state *s __maybe_unused) { -#ifdef ENABLE_HTTPS RRDHOST *host = s->host; bool ssl_required = host->destination && host->destination->ssl; @@ -627,11 +628,6 @@ static bool rrdpush_sender_connect_ssl(struct sender_state *s __maybe_unused) { netdata_log_error("SSL: failed to establish connection."); return false; - -#else - // SSL is not enabled - return true; -#endif } static int rrdpush_http_upgrade_prelude(RRDHOST *host, struct sender_state *s) { @@ -644,9 +640,7 @@ static int rrdpush_http_upgrade_prelude(RRDHOST *host, struct sender_state *s) { HTTP_HDR_END); ssize_t bytes = send_timeout( -#ifdef ENABLE_HTTPS &host->sender->ssl, -#endif s->rrdpush_sender_socket, http, strlen(http), @@ -654,9 +648,7 @@ static int rrdpush_http_upgrade_prelude(RRDHOST *host, struct sender_state *s) { 1000); bytes = recv_timeout( -#ifdef ENABLE_HTTPS &host->sender->ssl, -#endif s->rrdpush_sender_socket, http, HTTP_HEADER_SIZE, @@ -818,7 +810,7 @@ static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_p HTTP_1_1 HTTP_ENDL "User-Agent: %s/%s\r\n" "Accept: */*\r\n\r\n" - , host->rrdpush_send_api_key + , host->rrdpush.send.api_key , rrdhost_hostname(host) , rrdhost_registry_hostname(host) , host->machine_guid @@ -885,9 +877,7 @@ static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_p ssize_t len = (ssize_t)strlen(http); ssize_t bytes = send_timeout( -#ifdef ENABLE_HTTPS &host->sender->ssl, -#endif s->rrdpush_sender_socket, http, len, @@ -914,9 +904,7 @@ static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_p } bytes = recv_timeout( -#ifdef ENABLE_HTTPS &host->sender->ssl, -#endif s->rrdpush_sender_socket, http, HTTP_HEADER_SIZE, @@ -1037,14 +1025,10 @@ static ssize_t attempt_to_send(struct sender_state *s) { size_t outstanding = cbuffer_next_unsafe(s->buffer, &chunk); netdata_log_debug(D_STREAM, "STREAM: Sending data. Buffer r=%zu w=%zu s=%zu, next chunk=%zu", cb->read, cb->write, cb->size, outstanding); -#ifdef ENABLE_HTTPS if(SSL_connection(&s->ssl)) ret = netdata_ssl_write(&s->ssl, chunk, outstanding); else ret = send(s->rrdpush_sender_socket, chunk, outstanding, MSG_DONTWAIT); -#else - ret = send(s->rrdpush_sender_socket, chunk, outstanding, MSG_DONTWAIT); -#endif if (likely(ret > 0)) { cbuffer_remove_unsafe(s->buffer, ret); @@ -1072,14 +1056,10 @@ static ssize_t attempt_to_send(struct sender_state *s) { static ssize_t attempt_read(struct sender_state *s) { ssize_t ret; -#ifdef ENABLE_HTTPS if (SSL_connection(&s->ssl)) ret = netdata_ssl_read(&s->ssl, s->read_buffer + s->read_len, sizeof(s->read_buffer) - s->read_len - 1); else ret = recv(s->rrdpush_sender_socket, s->read_buffer + s->read_len, sizeof(s->read_buffer) - s->read_len - 1,MSG_DONTWAIT); -#else - ret = recv(s->rrdpush_sender_socket, s->read_buffer + s->read_len, sizeof(s->read_buffer) - s->read_len - 1,MSG_DONTWAIT); -#endif if (ret > 0) { s->read_len += ret; @@ -1089,13 +1069,9 @@ static ssize_t attempt_read(struct sender_state *s) { if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) return ret; -#ifdef ENABLE_HTTPS if (SSL_connection(&s->ssl)) worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR); - else -#endif - - if (ret == 0 || errno == ECONNRESET) { + else if (ret == 0 || errno == ECONNRESET) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_PARENT_CLOSED); netdata_log_error("STREAM %s [send to %s]: connection closed by far end.", rrdhost_hostname(s->host), s->connected_to); } @@ -1187,7 +1163,7 @@ static void execute_commands_function(struct sender_state *s, const char *comman stream_execute_function_callback, tmp, stream_has_capability(s, STREAM_CAP_PROGRESS) ? stream_execute_function_progress_callback : NULL, stream_has_capability(s, STREAM_CAP_PROGRESS) ? tmp : NULL, - NULL, NULL, payload, source); + NULL, NULL, payload, source, true); if(code != HTTP_RESP_OK) { if (!buffer_strlen(wb)) @@ -1333,8 +1309,12 @@ void execute_commands(struct sender_state *s) { ); } } + else if(command && strcmp(command, PLUGINSD_KEYWORD_NODE_ID) == 0) { + rrdpush_sender_get_node_and_claim_id_from_parent(s); + } else { - netdata_log_error("STREAM %s [send to %s] received unknown command over connection: %s", rrdhost_hostname(s->host), s->connected_to, s->line.words[0]?s->line.words[0]:"(unset)"); + netdata_log_error("STREAM %s [send to %s] received unknown command over connection: %s", + rrdhost_hostname(s->host), s->connected_to, s->line.words[0]?s->line.words[0]:"(unset)"); } line_splitter_reset(&s->line); @@ -1516,7 +1496,6 @@ static void rrdpush_sender_thread_cleanup_callback(void *pptr) { } void rrdpush_initialize_ssl_ctx(RRDHOST *host __maybe_unused) { -#ifdef ENABLE_HTTPS static SPINLOCK sp = NETDATA_SPINLOCK_INITIALIZER; spinlock_lock(&sp); @@ -1538,7 +1517,6 @@ void rrdpush_initialize_ssl_ctx(RRDHOST *host __maybe_unused) { } spinlock_unlock(&sp); -#endif } static bool stream_sender_log_capabilities(BUFFER *wb, void *ptr) { @@ -1555,11 +1533,7 @@ static bool stream_sender_log_transport(BUFFER *wb, void *ptr) { if(!state) return false; -#ifdef ENABLE_HTTPS buffer_strcat(wb, SSL_connection(&state->ssl) ? "https" : "http"); -#else - buffer_strcat(wb, "http"); -#endif return true; } @@ -1627,9 +1601,9 @@ void *rrdpush_sender_thread(void *ptr) { worker_register_job_custom_metric(WORKER_SENDER_JOB_BYTES_COMPRESSION_RATIO, "cumulative compression savings ratio", "%", WORKER_METRIC_ABSOLUTE); worker_register_job_custom_metric(WORKER_SENDER_JOB_REPLAY_DICT_SIZE, "replication dict entries", "entries", WORKER_METRIC_ABSOLUTE); - if(!rrdhost_has_rrdpush_sender_enabled(s->host) || !s->host->rrdpush_send_destination || - !*s->host->rrdpush_send_destination || !s->host->rrdpush_send_api_key || - !*s->host->rrdpush_send_api_key) { + if(!rrdhost_has_rrdpush_sender_enabled(s->host) || !s->host->rrdpush.send.destination || + !*s->host->rrdpush.send.destination || !s->host->rrdpush.send.api_key || + !*s->host->rrdpush.send.api_key) { netdata_log_error("STREAM %s [send]: thread created (task id %d), but host has streaming disabled.", rrdhost_hostname(s->host), gettid_cached()); return NULL; @@ -1714,7 +1688,7 @@ void *rrdpush_sender_thread(void *ptr) { break; now_s = s->last_traffic_seen_t = now_monotonic_sec(); - rrdpush_send_claimed_id(s->host); + rrdpush_sender_send_claimed_id(s->host); rrdpush_send_host_labels(s->host); rrdpush_send_global_functions(s->host); s->replication.oldest_request_after_t = 0; diff --git a/src/streaming/stream.conf b/src/streaming/stream.conf index 475d5eac227bd2..0b9be526e8bc7e 100644 --- a/src/streaming/stream.conf +++ b/src/streaming/stream.conf @@ -62,32 +62,33 @@ #enable compression = yes # The timeout to connect and send metrics - timeout seconds = 60 + #timeout seconds = 60 # If the destination line above does not specify a port, use this - default port = 19999 + #default port = 19999 - # filter the charts to be streamed + # filter the charts and contexts to be streamed # netdata SIMPLE PATTERN: # - space separated list of patterns (use \ to include spaces in patterns) # - use * as wildcard, any number of times within each pattern # - prefix a pattern with ! for a negative match (ie not stream the charts it matches) # - the order of patterns is important (left to right) # To send all except a few, use: !this !that * (ie append a wildcard pattern) - send charts matching = * + # The pattern is matched against the context, the chart name and the chart id. + #send charts matching = * # The buffer to use for sending metrics. # 10MB is good for 60 seconds of data, so increase this if you expect latencies. # The buffer is flushed on reconnects (this will not prevent gaps at the charts). - buffer size bytes = 10485760 + #buffer size bytes = 10485760 # If the connection fails, or it disconnects, # retry after that many seconds. - reconnect delay seconds = 5 + #reconnect delay seconds = 5 # Sync the clock of the charts for that many iterations, when starting. # It is ignored when replication is enabled - initial clock resync iterations = 60 + #initial clock resync iterations = 60 # ----------------------------------------------------------------------------- # 2. ON PARENT NETDATA - THE ONE THAT WILL BE RECEIVING METRICS @@ -124,7 +125,7 @@ # will be pushing metrics using this API key. # The metrics are received via the API port, so the same IPs # should also be matched at netdata.conf [web].allow connections from - allow from = * + #allow from = * # The default history in entries, for all hosts using this API key. # You can also set it per host below. @@ -151,7 +152,7 @@ #health enabled by default = auto # postpone alarms for a short period after the sender is connected - default postpone alarms on connect seconds = 60 + #default postpone alarms on connect seconds = 60 # seconds of health log events to keep #default health log history = 432000 @@ -217,7 +218,7 @@ # The metrics are received via the API port, so the same IPs # should also be matched at netdata.conf [web].allow connections from # and at stream.conf [API_KEY].allow from - allow from = * + #allow from = * # The number of entries in the database. # This is ignored for db mode dbengine. @@ -230,7 +231,7 @@ #health enabled = auto # postpone alarms when the sender connects - postpone alarms on connect seconds = 60 + #postpone alarms on connect seconds = 60 # seconds of health log events to keep #health log history = 432000 diff --git a/src/web/api/badges/web_buffer_svg.h b/src/web/api/badges/web_buffer_svg.h deleted file mode 100644 index 71857811fe7e2d..00000000000000 --- a/src/web/api/badges/web_buffer_svg.h +++ /dev/null @@ -1,18 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_WEB_BUFFER_SVG_H -#define NETDATA_WEB_BUFFER_SVG_H 1 - -#include "libnetdata/libnetdata.h" -#include "web/server/web_client.h" - -void buffer_svg(BUFFER *wb, const char *label, - NETDATA_DOUBLE value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val); -char *format_value_and_unit(char *value_string, size_t value_string_len, - NETDATA_DOUBLE value, const char *units, int precision); - -int web_client_api_request_v1_badge(struct rrdhost *host, struct web_client *w, char *url); - -#include "web/api/web_api_v1.h" - -#endif /* NETDATA_WEB_BUFFER_SVG_H */ diff --git a/src/web/api/exporters/allmetrics.c b/src/web/api/exporters/allmetrics.c deleted file mode 100644 index 55179c0ae46d1d..00000000000000 --- a/src/web/api/exporters/allmetrics.c +++ /dev/null @@ -1,132 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "allmetrics.h" - -struct prometheus_output_options { - char *name; - PROMETHEUS_OUTPUT_OPTIONS flag; -} prometheus_output_flags_root[] = { - { "names", PROMETHEUS_OUTPUT_NAMES }, - { "timestamps", PROMETHEUS_OUTPUT_TIMESTAMPS }, - { "variables", PROMETHEUS_OUTPUT_VARIABLES }, - { "oldunits", PROMETHEUS_OUTPUT_OLDUNITS }, - { "hideunits", PROMETHEUS_OUTPUT_HIDEUNITS }, - // terminator - { NULL, PROMETHEUS_OUTPUT_NONE }, -}; - -inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url) { - int format = ALLMETRICS_SHELL; - const char *filter = NULL; - const char *prometheus_server = w->client_ip; - - uint32_t prometheus_exporting_options; - if (prometheus_exporter_instance) - prometheus_exporting_options = prometheus_exporter_instance->config.options; - else - prometheus_exporting_options = global_exporting_options; - - PROMETHEUS_OUTPUT_OPTIONS prometheus_output_options = - PROMETHEUS_OUTPUT_TIMESTAMPS | - ((prometheus_exporting_options & EXPORTING_OPTION_SEND_NAMES) ? PROMETHEUS_OUTPUT_NAMES : 0); - - const char *prometheus_prefix; - if (prometheus_exporter_instance) - prometheus_prefix = prometheus_exporter_instance->config.prefix; - else - prometheus_prefix = global_exporting_prefix; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - if(!strcmp(name, "format")) { - if(!strcmp(value, ALLMETRICS_FORMAT_SHELL)) - format = ALLMETRICS_SHELL; - else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS)) - format = ALLMETRICS_PROMETHEUS; - else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS)) - format = ALLMETRICS_PROMETHEUS_ALL_HOSTS; - else if(!strcmp(value, ALLMETRICS_FORMAT_JSON)) - format = ALLMETRICS_JSON; - else - format = 0; - } - else if(!strcmp(name, "filter")) { - filter = value; - } - else if(!strcmp(name, "server")) { - prometheus_server = value; - } - else if(!strcmp(name, "prefix")) { - prometheus_prefix = value; - } - else if(!strcmp(name, "data") || !strcmp(name, "source") || !strcmp(name, "data source") || !strcmp(name, "data-source") || !strcmp(name, "data_source") || !strcmp(name, "datasource")) { - prometheus_exporting_options = exporting_parse_data_source(value, prometheus_exporting_options); - } - else { - int i; - for(i = 0; prometheus_output_flags_root[i].name ; i++) { - if(!strcmp(name, prometheus_output_flags_root[i].name)) { - if(!strcmp(value, "yes") || !strcmp(value, "1") || !strcmp(value, "true")) - prometheus_output_options |= prometheus_output_flags_root[i].flag; - else { - prometheus_output_options &= ~prometheus_output_flags_root[i].flag; - } - - break; - } - } - } - } - - buffer_flush(w->response.data); - buffer_no_cacheable(w->response.data); - - switch(format) { - case ALLMETRICS_JSON: - w->response.data->content_type = CT_APPLICATION_JSON; - rrd_stats_api_v1_charts_allmetrics_json(host, filter, w->response.data); - return HTTP_RESP_OK; - - case ALLMETRICS_SHELL: - w->response.data->content_type = CT_TEXT_PLAIN; - rrd_stats_api_v1_charts_allmetrics_shell(host, filter, w->response.data); - return HTTP_RESP_OK; - - case ALLMETRICS_PROMETHEUS: - w->response.data->content_type = CT_PROMETHEUS; - rrd_stats_api_v1_charts_allmetrics_prometheus_single_host( - host - , filter - , w->response.data - , prometheus_server - , prometheus_prefix - , prometheus_exporting_options - , prometheus_output_options - ); - return HTTP_RESP_OK; - - case ALLMETRICS_PROMETHEUS_ALL_HOSTS: - w->response.data->content_type = CT_PROMETHEUS; - rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts( - host - , filter - , w->response.data - , prometheus_server - , prometheus_prefix - , prometheus_exporting_options - , prometheus_output_options - ); - return HTTP_RESP_OK; - - default: - w->response.data->content_type = CT_TEXT_PLAIN; - buffer_strcat(w->response.data, "Which format? '" ALLMETRICS_FORMAT_SHELL "', '" ALLMETRICS_FORMAT_PROMETHEUS "', '" ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "' and '" ALLMETRICS_FORMAT_JSON "' are currently supported."); - return HTTP_RESP_BAD_REQUEST; - } -} diff --git a/src/web/api/exporters/allmetrics.h b/src/web/api/exporters/allmetrics.h deleted file mode 100644 index 3afc42e284a8b5..00000000000000 --- a/src/web/api/exporters/allmetrics.h +++ /dev/null @@ -1,12 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_API_ALLMETRICS_H -#define NETDATA_API_ALLMETRICS_H - -#include "web/api/formatters/rrd2json.h" -#include "shell/allmetrics_shell.h" -#include "web/server/web_client.h" - -int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url); - -#endif //NETDATA_API_ALLMETRICS_H diff --git a/src/web/api/exporters/shell/README.md b/src/web/api/exporters/shell/README.md index 86b774f1b29c5e..577b0c9ec52747 100644 --- a/src/web/api/exporters/shell/README.md +++ b/src/web/api/exporters/shell/README.md @@ -12,14 +12,14 @@ learn_rel_path: "Developers/Web/Api/Exporters" Shell scripts can now query Netdata: ```sh -eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')" +eval "$(curl -s 'http://localhost:19999/api/v3/allmetrics')" ``` after this command, all the Netdata metrics are exposed to shell. Check: ```sh # source the metrics -eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')" +eval "$(curl -s 'http://localhost:19999/api/v3/allmetrics')" # let's see if there are variables exposed by Netdata for system.cpu set | grep "^NETDATA_SYSTEM_CPU" @@ -50,7 +50,7 @@ echo ${NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_STATUS} CLEAR # is it fast? -time curl -s 'http://localhost:19999/api/v1/allmetrics' >/dev/null +time curl -s 'http://localhost:19999/api/v3/allmetrics' >/dev/null real 0m0,070s user 0m0,000s diff --git a/src/web/api/exporters/shell/allmetrics_shell.h b/src/web/api/exporters/shell/allmetrics_shell.h deleted file mode 100644 index d6598e08d796bc..00000000000000 --- a/src/web/api/exporters/shell/allmetrics_shell.h +++ /dev/null @@ -1,21 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_API_ALLMETRICS_SHELL_H -#define NETDATA_API_ALLMETRICS_SHELL_H - -#include "../allmetrics.h" - -#define ALLMETRICS_FORMAT_SHELL "shell" -#define ALLMETRICS_FORMAT_PROMETHEUS "prometheus" -#define ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "prometheus_all_hosts" -#define ALLMETRICS_FORMAT_JSON "json" - -#define ALLMETRICS_SHELL 1 -#define ALLMETRICS_PROMETHEUS 2 -#define ALLMETRICS_JSON 3 -#define ALLMETRICS_PROMETHEUS_ALL_HOSTS 4 - -void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_string, BUFFER *wb); -void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_string, BUFFER *wb); - -#endif //NETDATA_API_ALLMETRICS_SHELL_H diff --git a/src/web/api/formatters/rrd2json.c b/src/web/api/formatters/rrd2json.c index 81c9ad5c78d452..a80275487ab1bc 100644 --- a/src/web/api/formatters/rrd2json.c +++ b/src/web/api/formatters/rrd2json.c @@ -10,46 +10,6 @@ void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb) buffer_json_finalize(wb); } -const char *rrdr_format_to_string(DATASOURCE_FORMAT format) { - switch(format) { - case DATASOURCE_JSON: - return DATASOURCE_FORMAT_JSON; - - case DATASOURCE_JSON2: - return DATASOURCE_FORMAT_JSON2; - - case DATASOURCE_DATATABLE_JSON: - return DATASOURCE_FORMAT_DATATABLE_JSON; - - case DATASOURCE_DATATABLE_JSONP: - return DATASOURCE_FORMAT_DATATABLE_JSONP; - - case DATASOURCE_JSONP: - return DATASOURCE_FORMAT_JSONP; - - case DATASOURCE_SSV: - return DATASOURCE_FORMAT_SSV; - - case DATASOURCE_CSV: - return DATASOURCE_FORMAT_CSV; - - case DATASOURCE_TSV: - return DATASOURCE_FORMAT_TSV; - - case DATASOURCE_HTML: - return DATASOURCE_FORMAT_HTML; - - case DATASOURCE_JS_ARRAY: - return DATASOURCE_FORMAT_JS_ARRAY; - - case DATASOURCE_SSV_COMMA: - return DATASOURCE_FORMAT_SSV_COMMA; - - default: - return "unknown"; - } -} - int rrdset2value_api_v1( RRDSET *st , BUFFER *wb diff --git a/src/web/api/formatters/rrd2json.h b/src/web/api/formatters/rrd2json.h index f0c0c39ba7d4b4..cf3492ff2813a1 100644 --- a/src/web/api/formatters/rrd2json.h +++ b/src/web/api/formatters/rrd2json.h @@ -3,26 +3,8 @@ #ifndef NETDATA_RRD2JSON_H #define NETDATA_RRD2JSON_H 1 -// type of JSON generations -typedef enum { - DATASOURCE_JSON = 0, - DATASOURCE_DATATABLE_JSON = 1, - DATASOURCE_DATATABLE_JSONP = 2, - DATASOURCE_SSV = 3, - DATASOURCE_CSV = 4, - DATASOURCE_JSONP = 5, - DATASOURCE_TSV = 6, - DATASOURCE_HTML = 7, - DATASOURCE_JS_ARRAY = 8, - DATASOURCE_SSV_COMMA = 9, - DATASOURCE_CSV_JSON_ARRAY = 10, - DATASOURCE_CSV_MARKDOWN = 11, - DATASOURCE_JSON2 = 12, -} DATASOURCE_FORMAT; +#include "web/api/web_api.h" -#include "web/api/web_api_v1.h" - -#include "web/api/exporters/allmetrics.h" #include "web/api/queries/rrdr.h" #include "web/api/formatters/csv/csv.h" @@ -38,22 +20,7 @@ typedef enum { #define HOSTNAME_MAX 1024 -#define DATASOURCE_FORMAT_JSON "json" -#define DATASOURCE_FORMAT_JSON2 "json2" -#define DATASOURCE_FORMAT_DATATABLE_JSON "datatable" -#define DATASOURCE_FORMAT_DATATABLE_JSONP "datasource" -#define DATASOURCE_FORMAT_JSONP "jsonp" -#define DATASOURCE_FORMAT_SSV "ssv" -#define DATASOURCE_FORMAT_CSV "csv" -#define DATASOURCE_FORMAT_TSV "tsv" -#define DATASOURCE_FORMAT_HTML "html" -#define DATASOURCE_FORMAT_JS_ARRAY "array" -#define DATASOURCE_FORMAT_SSV_COMMA "ssvcomma" -#define DATASOURCE_FORMAT_CSV_JSON_ARRAY "csvjsonarray" -#define DATASOURCE_FORMAT_CSV_MARKDOWN "markdown" - void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb); -const char *rrdr_format_to_string(DATASOURCE_FORMAT format); int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, struct query_target *qt, time_t *latest_timestamp); diff --git a/src/web/api/functions/function-bearer_get_token.c b/src/web/api/functions/function-bearer_get_token.c new file mode 100644 index 00000000000000..c36b16a7b22781 --- /dev/null +++ b/src/web/api/functions/function-bearer_get_token.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "function-bearer_get_token.h" +#include "../v2/api_v2_calls.h" + +struct bearer_token_request { + nd_uuid_t claim_id; + nd_uuid_t machine_guid; + nd_uuid_t node_id; + HTTP_USER_ROLE user_role; + HTTP_ACCESS access; + nd_uuid_t cloud_account_id; + STRING *client_name; +}; + +static bool parse_json_payload(json_object *jobj, const char *path, void *data, BUFFER *error) { + struct bearer_token_request *rq = data; + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, path, "claim_id", rq->claim_id, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, path, "machine_guid", rq->machine_guid, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, path, "node_id", rq->node_id, error, true); + JSONC_PARSE_TXT2ENUM_OR_ERROR_AND_RETURN(jobj, path, "user_role", http_user_role2id, rq->user_role, error, true); + JSONC_PARSE_ARRAY_OF_TXT2BITMAP_OR_ERROR_AND_RETURN(jobj, path, "access", http_access2id_one, rq->access, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, path, "cloud_account_id", rq->cloud_account_id, error, true); + JSONC_PARSE_TXT2STRING_OR_ERROR_AND_RETURN(jobj, path, "client_name", rq->client_name, error, true); + return true; +} + +int function_bearer_get_token(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload, const char *source) { + if(!request_source_is_cloud(source)) + return rrd_call_function_error( + wb, "You cannot access this function from outside Netdata Cloud", HTTP_RESP_BAD_REQUEST); + + int code; + struct bearer_token_request rq = { 0 }; + CLEAN_JSON_OBJECT *jobj = json_parse_function_payload_or_error(wb, payload, &code, parse_json_payload, &rq); + if(!jobj || code != HTTP_RESP_OK) { + string_freez(rq.client_name); + return code; + } + + char claim_id[UUID_STR_LEN]; + uuid_unparse_lower(rq.claim_id, claim_id); + + char machine_guid[UUID_STR_LEN]; + uuid_unparse_lower(rq.machine_guid, machine_guid); + + char node_id[UUID_STR_LEN]; + uuid_unparse_lower(rq.node_id, node_id); + + int rc = bearer_get_token_json_response(wb, localhost, claim_id, machine_guid, node_id, + rq.user_role, rq.access, rq.cloud_account_id, + string2str(rq.client_name)); + + string_freez(rq.client_name); + return rc; +} + +int call_function_bearer_get_token(RRDHOST *host, struct web_client *w, const char *claim_id, const char *machine_guid, const char *node_id) { + CLEAN_BUFFER *payload = buffer_create(0, NULL); + buffer_json_initialize(payload, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + buffer_json_member_add_string(payload, "claim_id", claim_id); + buffer_json_member_add_string(payload, "machine_guid", machine_guid); + buffer_json_member_add_string(payload, "node_id", node_id); + buffer_json_member_add_string(payload, "user_role", http_id2user_role(w->user_role)); + http_access2buffer_json_array(payload, "access", w->access); + buffer_json_member_add_uuid(payload, "cloud_account_id", w->auth.cloud_account_id); + buffer_json_member_add_string(payload, "client_name", w->auth.client_name); + buffer_json_finalize(payload); + + CLEAN_BUFFER *source = buffer_create(0, NULL); + web_client_api_request_vX_source_to_buffer(w, source); + + char transaction_str[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(w->transaction, transaction_str); + return rrd_function_run(host, w->response.data, 10, + w->access, RRDFUNCTIONS_BEARER_GET_TOKEN, true, + transaction_str, NULL, NULL, + NULL, NULL, + NULL, NULL, + payload, buffer_tostring(source), true); +} diff --git a/src/web/api/functions/function-bearer_get_token.h b/src/web/api/functions/function-bearer_get_token.h new file mode 100644 index 00000000000000..03481ebb83ad4f --- /dev/null +++ b/src/web/api/functions/function-bearer_get_token.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_FUNCTION_BEARER_GET_TOKEN_H +#define NETDATA_FUNCTION_BEARER_GET_TOKEN_H + +#include "daemon/common.h" + +int function_bearer_get_token(BUFFER *wb, const char *function, BUFFER *payload, const char *source); +int call_function_bearer_get_token(RRDHOST *host, struct web_client *w, const char *claim_id, const char *machine_guid, const char *node_id); + +#define RRDFUNCTIONS_BEARER_GET_TOKEN "bearer_get_token" +#define RRDFUNCTIONS_BEARER_GET_TOKEN_HELP "Get a bearer token for authenticated direct access to the agent" + +#endif //NETDATA_FUNCTION_BEARER_GET_TOKEN_H diff --git a/src/web/api/functions/function-progress.c b/src/web/api/functions/function-progress.c new file mode 100644 index 00000000000000..052a9020af2785 --- /dev/null +++ b/src/web/api/functions/function-progress.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "function-progress.h" + +int function_progress(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { + return progress_function_result(wb, rrdhost_hostname(localhost)); +} + diff --git a/src/web/api/functions/function-progress.h b/src/web/api/functions/function-progress.h new file mode 100644 index 00000000000000..7d2d10b9de3a57 --- /dev/null +++ b/src/web/api/functions/function-progress.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_FUNCTION_PROGRESS_H +#define NETDATA_FUNCTION_PROGRESS_H + +#include "daemon/common.h" + +int function_progress(BUFFER *wb, const char *function, BUFFER *payload, const char *source); + +#endif //NETDATA_FUNCTION_PROGRESS_H diff --git a/src/database/rrdfunctions-streaming.c b/src/web/api/functions/function-streaming.c similarity index 99% rename from src/database/rrdfunctions-streaming.c rename to src/web/api/functions/function-streaming.c index baf3ebc388442c..11e97044175293 100644 --- a/src/database/rrdfunctions-streaming.c +++ b/src/web/api/functions/function-streaming.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "rrdfunctions-streaming.h" +#include "function-streaming.h" -int rrdhost_function_streaming(BUFFER *wb, const char *function __maybe_unused) { +int function_streaming(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) { time_t now = now_realtime_sec(); diff --git a/src/web/api/functions/function-streaming.h b/src/web/api/functions/function-streaming.h new file mode 100644 index 00000000000000..06da6af9ffe3cc --- /dev/null +++ b/src/web/api/functions/function-streaming.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_FUNCTION_STREAMING_H +#define NETDATA_FUNCTION_STREAMING_H + +#include "daemon/common.h" + +#define RRDFUNCTIONS_STREAMING_HELP "Streaming status for parents and children." + +int function_streaming(BUFFER *wb, const char *function, BUFFER *payload, const char *source); + +#endif //NETDATA_FUNCTION_STREAMING_H diff --git a/src/web/api/functions/functions.c b/src/web/api/functions/functions.c new file mode 100644 index 00000000000000..8bbc7ea51e5421 --- /dev/null +++ b/src/web/api/functions/functions.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "functions.h" + +void global_functions_add(void) { + // we register this only on localhost + // for the other nodes, the origin server should register it + rrd_function_add_inline( + localhost, + NULL, + "streaming", + 10, + RRDFUNCTIONS_PRIORITY_DEFAULT + 1, + RRDFUNCTIONS_STREAMING_HELP, + "top", + HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA, + function_streaming); + + rrd_function_add_inline( + localhost, + NULL, + "netdata-api-calls", + 10, + RRDFUNCTIONS_PRIORITY_DEFAULT + 2, + RRDFUNCTIONS_PROGRESS_HELP, + "top", + HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA, + function_progress); + + rrd_function_add_inline( + localhost, + NULL, + RRDFUNCTIONS_BEARER_GET_TOKEN, + 10, + RRDFUNCTIONS_PRIORITY_DEFAULT + 3, + RRDFUNCTIONS_BEARER_GET_TOKEN_HELP, + RRDFUNCTIONS_TAG_HIDDEN, + HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA, + function_bearer_get_token); +} diff --git a/src/web/api/functions/functions.h b/src/web/api/functions/functions.h new file mode 100644 index 00000000000000..28c48354108afd --- /dev/null +++ b/src/web/api/functions/functions.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_FUNCTIONS_H +#define NETDATA_FUNCTIONS_H + +#include "daemon/common.h" + +#include "function-streaming.h" +#include "function-progress.h" +#include "function-bearer_get_token.h" + +void global_functions_add(void); + +#endif //NETDATA_FUNCTIONS_H diff --git a/src/web/api/http_auth.c b/src/web/api/http_auth.c index ec0520304fe978..9e45f5e166c694 100644 --- a/src/web/api/http_auth.c +++ b/src/web/api/http_auth.c @@ -2,83 +2,341 @@ #include "http_auth.h" -#define BEARER_TOKEN_EXPIRATION 86400 +#define BEARER_TOKEN_EXPIRATION (86400 * 1) -bool netdata_is_protected_by_bearer = false; // this is controlled by cloud, at the point the agent logs in - this should also be saved to /var/lib/netdata +bool netdata_is_protected_by_bearer = false; static DICTIONARY *netdata_authorized_bearers = NULL; struct bearer_token { nd_uuid_t cloud_account_id; - char cloud_user_name[CLOUD_USER_NAME_LENGTH]; + char client_name[CLOUD_CLIENT_NAME_LENGTH]; HTTP_ACCESS access; HTTP_USER_ROLE user_role; time_t created_s; time_t expires_s; }; -bool web_client_bearer_token_auth(struct web_client *w, const char *v) { - if(!uuid_parse_flexi(v, w->auth.bearer_token)) { - char uuid_str[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(w->auth.bearer_token, uuid_str); +static void bearer_tokens_path(char out[FILENAME_MAX]) { + filename_from_path_entry(out, netdata_configured_varlib_dir, "bearer_tokens", NULL); +} - struct bearer_token *z = dictionary_get(netdata_authorized_bearers, uuid_str); - if (z && z->expires_s > now_monotonic_sec()) { - strncpyz(w->auth.client_name, z->cloud_user_name, sizeof(w->auth.client_name) - 1); - uuid_copy(w->auth.cloud_account_id, z->cloud_account_id); - web_client_set_permissions(w, z->access, z->user_role, WEB_CLIENT_FLAG_AUTH_BEARER); - return true; - } - } - else - nd_log(NDLS_DAEMON, NDLP_NOTICE, "Invalid bearer token '%s' received.", v); +static void bearer_token_filename(char out[FILENAME_MAX], nd_uuid_t uuid) { + char uuid_str[UUID_STR_LEN]; + uuid_unparse_lower(uuid, uuid_str); + + char path[FILENAME_MAX]; + bearer_tokens_path(path); + filename_from_path_entry(out, path, uuid_str, NULL); +} - return false; +static inline bool bearer_tokens_ensure_path_exists(void) { + char path[FILENAME_MAX]; + bearer_tokens_path(path); + return filename_is_dir(path, true); } -static void bearer_token_cleanup(void) { +static void bearer_token_delete_from_disk(nd_uuid_t *token) { + char filename[FILENAME_MAX]; + bearer_token_filename(filename, *token); + if(unlink(filename) != 0) + nd_log(NDLS_DAEMON, NDLP_ERR, "Failed to unlink() file '%s'", filename); +} + +static void bearer_token_cleanup(bool force) { static time_t attempts = 0; - if(++attempts % 1000 != 0) + if(++attempts % 1000 != 0 && !force) return; - time_t now_s = now_monotonic_sec(); + time_t now_s = now_realtime_sec(); struct bearer_token *z; dfe_start_read(netdata_authorized_bearers, z) { - if(z->expires_s < now_s) + if(z->expires_s < now_s) { + nd_uuid_t uuid; + if(uuid_parse_flexi(z_dfe.name, uuid) == 0) + bearer_token_delete_from_disk(&uuid); + dictionary_del(netdata_authorized_bearers, z_dfe.name); + } } dfe_done(z); dictionary_garbage_collect(netdata_authorized_bearers); } -void bearer_tokens_init(void) { - netdata_authorized_bearers = dictionary_create_advanced( - DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, - NULL, sizeof(struct bearer_token)); +static uint64_t bearer_token_signature(nd_uuid_t token, struct bearer_token *bt) { + // we use a custom structure to make sure that changes in the other code will not affect the signature + + struct { + nd_uuid_t host_uuid; + nd_uuid_t token; + nd_uuid_t cloud_account_id; + char client_name[CLOUD_CLIENT_NAME_LENGTH]; + HTTP_ACCESS access; + HTTP_USER_ROLE user_role; + time_t created_s; + time_t expires_s; + } signature_payload = { + .access = bt->access, + .user_role = bt->user_role, + .created_s = bt->created_s, + .expires_s = bt->expires_s, + }; + uuid_copy(signature_payload.host_uuid, localhost->host_uuid); + uuid_copy(signature_payload.token, token); + uuid_copy(signature_payload.cloud_account_id, bt->cloud_account_id); + memset(signature_payload.client_name, 0, sizeof(signature_payload.client_name)); + strncpyz(signature_payload.client_name, bt->client_name, sizeof(signature_payload.client_name) - 1); + + return XXH3_64bits(&signature_payload, sizeof(signature_payload)); } -time_t bearer_create_token(nd_uuid_t *uuid, struct web_client *w) { +static bool bearer_token_save_to_file(nd_uuid_t token, struct bearer_token *bt) { + CLEAN_BUFFER *wb = buffer_create(0, NULL); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + buffer_json_member_add_uint64(wb, "version", 1); + buffer_json_member_add_uuid(wb, "host_uuid", localhost->host_uuid); + buffer_json_member_add_uuid(wb, "token", token); + buffer_json_member_add_uuid(wb, "cloud_account_id", bt->cloud_account_id); + buffer_json_member_add_string(wb, "client_name", bt->client_name); + http_access2buffer_json_array(wb, "access", bt->access); + buffer_json_member_add_string(wb, "user_role", http_id2user_role(bt->user_role)); + buffer_json_member_add_uint64(wb, "created_s", bt->created_s); + buffer_json_member_add_uint64(wb, "expires_s", bt->expires_s); + buffer_json_member_add_uint64(wb, "signature", bearer_token_signature(token, bt)); + buffer_json_finalize(wb); + + char filename[FILENAME_MAX]; + bearer_token_filename(filename, token); + + FILE *fp = fopen(filename, "w"); + if(!fp) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Cannot create file '%s'", filename); + return false; + } + + if(fwrite(buffer_tostring(wb), 1, buffer_strlen(wb), fp) != buffer_strlen(wb)) { + fclose(fp); + unlink(filename); + nd_log(NDLS_DAEMON, NDLP_ERR, "Cannot save file '%s'", filename); + return false; + } + + fclose(fp); + return true; +} + +static time_t bearer_create_token_internal(nd_uuid_t token, HTTP_USER_ROLE user_role, HTTP_ACCESS access, nd_uuid_t cloud_account_id, const char *client_name, time_t created_s, time_t expires_s, bool save) { char uuid_str[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(token, uuid_str); + + struct bearer_token t = { 0 }, *bt; + const DICTIONARY_ITEM *item = dictionary_set_and_acquire_item(netdata_authorized_bearers, uuid_str, &t, sizeof(t)); + bt = dictionary_acquired_item_value(item); + + if(!bt->created_s) { + bt->created_s = created_s; + bt->expires_s = expires_s; + bt->user_role = user_role; + bt->access = access; + + uuid_copy(bt->cloud_account_id, cloud_account_id); + strncpyz(bt->client_name, client_name, sizeof(bt->cloud_account_id) - 1); + + if(save) + bearer_token_save_to_file(token, bt); + } + + time_t expiration = bt->expires_s; + + dictionary_acquired_item_release(netdata_authorized_bearers, item); + + return expiration; +} + +time_t bearer_create_token(nd_uuid_t *uuid, HTTP_USER_ROLE user_role, HTTP_ACCESS access, nd_uuid_t cloud_account_id, const char *client_name) { + time_t now_s = now_realtime_sec(); + time_t expires_s = 0; + + struct bearer_token *bt; + dfe_start_read(netdata_authorized_bearers, bt) { + if(bt->expires_s > now_s + 3600 * 2 && // expires in more than 2 hours + user_role == bt->user_role && // the user_role matches + access == bt->access && // the access matches + uuid_eq(cloud_account_id, bt->cloud_account_id) && // the cloud_account_id matches + strncmp(client_name, bt->client_name, sizeof(bt->client_name) - 1) == 0 && // the client_name matches + uuid_parse_flexi(bt_dfe.name, *uuid) == 0) // the token can be parsed + return expires_s; /* dfe will cleanup automatically */ + } + dfe_done(bt); uuid_generate_random(*uuid); - uuid_unparse_lower_compact(*uuid, uuid_str); - - struct bearer_token t = { 0 }, *z; - z = dictionary_set(netdata_authorized_bearers, uuid_str, &t, sizeof(t)); - if(!z->created_s) { - z->created_s = now_monotonic_sec(); - z->expires_s = z->created_s + BEARER_TOKEN_EXPIRATION; - z->user_role = w->user_role; - z->access = w->access; - uuid_copy(z->cloud_account_id, w->auth.cloud_account_id); - strncpyz(z->cloud_user_name, w->auth.client_name, sizeof(z->cloud_account_id) - 1); + expires_s = bearer_create_token_internal( + *uuid, user_role, access, cloud_account_id, client_name, + now_s, now_s + BEARER_TOKEN_EXPIRATION, true); + + bearer_token_cleanup(false); + + return expires_s; +} + +static bool bearer_token_parse_json(nd_uuid_t token, struct json_object *jobj, BUFFER *error) { + int64_t version; + nd_uuid_t token_in_file, cloud_account_id, host_uuid; + CLEAN_STRING *client_name = NULL; + HTTP_USER_ROLE user_role = HTTP_USER_ROLE_NONE; + HTTP_ACCESS access = HTTP_ACCESS_NONE; + time_t created_s = 0, expires_s = 0; + uint64_t signature = 0; + + JSONC_PARSE_INT64_OR_ERROR_AND_RETURN(jobj, ".", "version", version, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, ".", "host_uuid", host_uuid, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, ".", "token", token_in_file, error, true); + JSONC_PARSE_TXT2UUID_OR_ERROR_AND_RETURN(jobj, ".", "cloud_account_id", cloud_account_id, error, true); + JSONC_PARSE_TXT2STRING_OR_ERROR_AND_RETURN(jobj, ".", "client_name", client_name, error, true); + JSONC_PARSE_ARRAY_OF_TXT2BITMAP_OR_ERROR_AND_RETURN(jobj, ".", "access", http_access2id_one, access, error, true); + JSONC_PARSE_TXT2ENUM_OR_ERROR_AND_RETURN(jobj, ".", "user_role", http_user_role2id, user_role, error, true); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, ".", "created_s", created_s, error, true); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, ".", "expires_s", expires_s, error, true); + JSONC_PARSE_UINT64_OR_ERROR_AND_RETURN(jobj, ".", "signature", signature, error, true); + + if(uuid_compare(token, token_in_file) != 0) { + buffer_flush(error); + buffer_strcat(error, "token in JSON file does not match the filename"); + return false; + } + + if(uuid_compare(host_uuid, localhost->host_uuid) != 0) { + buffer_flush(error); + buffer_strcat(error, "Host UUID in JSON file does not match our host UUID"); + return false; + } + + if(!created_s || !expires_s || created_s >= expires_s) { + buffer_flush(error); + buffer_strcat(error, "bearer token has invalid dates"); + return false; + } + + struct bearer_token bt = { + .access = access, + .user_role = user_role, + .created_s = created_s, + .expires_s = expires_s, + }; + uuid_copy(bt.cloud_account_id, cloud_account_id); + strncpyz(bt.client_name, string2str(client_name), sizeof(bt.client_name) - 1); + + if(signature != bearer_token_signature(token_in_file, &bt)) { + buffer_flush(error); + buffer_strcat(error, "bearer token has invalid signature"); + return false; + } + + bearer_create_token_internal(token, user_role, access, + cloud_account_id, string2str(client_name), + created_s, expires_s, false); + + return true; +} + +static bool bearer_token_load_token(nd_uuid_t token) { + char filename[FILENAME_MAX]; + bearer_token_filename(filename, token); + + CLEAN_BUFFER *wb = buffer_create(0, NULL); + if(!read_txt_file_to_buffer(filename, wb, 1 * 1024 * 1024)) + return false; + + CLEAN_JSON_OBJECT *jobj = json_tokener_parse(buffer_tostring(wb)); + if (jobj == NULL) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Cannot parse bearer token file '%s'", filename); + return false; + } + + CLEAN_BUFFER *error = buffer_create(0, NULL); + bool rc = bearer_token_parse_json(token, jobj, error); + if(!rc) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Failed to parse bearer token file '%s': %s", filename, buffer_tostring(error)); + unlink(filename); + return false; + } + + bearer_token_cleanup(true); + + return true; +} + +static void bearer_tokens_load_from_disk(void) { + bearer_tokens_ensure_path_exists(); + + char path[FILENAME_MAX]; + bearer_tokens_path(path); + + DIR *dir = opendir(path); + if(!dir) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Cannot open directory '%s' to read saved bearer tokens", path); + return; } - bearer_token_cleanup(); + struct dirent *de; + while((de = readdir(dir))) { + if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) + continue; + + ND_UUID uuid = UUID_ZERO; + if(uuid_parse_flexi(de->d_name, uuid.uuid) != 0 || UUIDiszero(uuid)) + continue; + + char filename[FILENAME_MAX]; + filename_from_path_entry(filename, path, de->d_name, NULL); + + if(de->d_type == DT_REG || (de->d_type == DT_LNK && filename_is_file(filename))) + bearer_token_load_token(uuid.uuid); + } + + closedir(dir); +} + +bool web_client_bearer_token_auth(struct web_client *w, const char *v) { + bool rc = false; + + if(!uuid_parse_flexi(v, w->auth.bearer_token)) { + char uuid_str[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(w->auth.bearer_token, uuid_str); + + const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(netdata_authorized_bearers, uuid_str); + if(!item && bearer_token_load_token(w->auth.bearer_token)) + item = dictionary_get_and_acquire_item(netdata_authorized_bearers, uuid_str); + + if(item) { + struct bearer_token *bt = dictionary_acquired_item_value(item); + if (bt->expires_s > now_realtime_sec()) { + strncpyz(w->auth.client_name, bt->client_name, sizeof(w->auth.client_name) - 1); + uuid_copy(w->auth.cloud_account_id, bt->cloud_account_id); + web_client_set_permissions(w, bt->access, bt->user_role, WEB_CLIENT_FLAG_AUTH_BEARER); + rc = true; + } + + dictionary_acquired_item_release(netdata_authorized_bearers, item); + } + } + else + nd_log(NDLS_DAEMON, NDLP_NOTICE, "Invalid bearer token '%s' received.", v); + + return rc; +} + +void bearer_tokens_init(void) { + netdata_is_protected_by_bearer = + config_get_boolean(CONFIG_SECTION_WEB, "bearer token protection", netdata_is_protected_by_bearer); + + netdata_authorized_bearers = dictionary_create_advanced( + DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, + NULL, sizeof(struct bearer_token)); - return now_realtime_sec() + BEARER_TOKEN_EXPIRATION; + bearer_tokens_load_from_disk(); } bool extract_bearer_token_from_request(struct web_client *w, char *dst, size_t dst_len) { diff --git a/src/web/api/http_auth.h b/src/web/api/http_auth.h index f339a44cf0d26e..0b01fdb1eba246 100644 --- a/src/web/api/http_auth.h +++ b/src/web/api/http_auth.h @@ -11,7 +11,7 @@ extern bool netdata_is_protected_by_bearer; bool extract_bearer_token_from_request(struct web_client *w, char *dst, size_t dst_len); -time_t bearer_create_token(nd_uuid_t *uuid, struct web_client *w); +time_t bearer_create_token(nd_uuid_t *uuid, HTTP_USER_ROLE user_role, HTTP_ACCESS access, nd_uuid_t cloud_account_id, const char *client_name); bool web_client_bearer_token_auth(struct web_client *w, const char *v); static inline bool http_access_user_has_enough_access_level_for_endpoint(HTTP_ACCESS user, HTTP_ACCESS endpoint) { diff --git a/src/web/api/ilove/ilove.h b/src/web/api/ilove/ilove.h deleted file mode 100644 index 010c19c6b86ddc..00000000000000 --- a/src/web/api/ilove/ilove.h +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_WEB_API_ILOVE_H -#define NETDATA_WEB_API_ILOVE_H 1 - -#include "libnetdata/libnetdata.h" -#include "web/server/web_client.h" - -int web_client_api_request_v2_ilove(RRDHOST *host, struct web_client *w, char *url); - -#include "web/api/web_api_v1.h" - -#endif /* NETDATA_WEB_API_ILOVE_H */ diff --git a/src/web/api/maps/contexts_alert_statuses.c b/src/web/api/maps/contexts_alert_statuses.c new file mode 100644 index 00000000000000..d3565c9e8cd64c --- /dev/null +++ b/src/web/api/maps/contexts_alert_statuses.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "contexts_alert_statuses.h" + +static struct { + const char *name; + uint32_t hash; + CONTEXTS_ALERT_STATUS value; +} contexts_alert_status[] = { + {"uninitialized" , 0 , CONTEXT_ALERT_UNINITIALIZED} + , {"undefined" , 0 , CONTEXT_ALERT_UNDEFINED} + , {"clear" , 0 , CONTEXT_ALERT_CLEAR} + , {"raised" , 0 , CONTEXT_ALERT_RAISED} + , {"active" , 0 , CONTEXT_ALERT_RAISED} + , {"warning" , 0 , CONTEXT_ALERT_WARNING} + , {"critical" , 0 , CONTEXT_ALERT_CRITICAL} + , {NULL , 0 , 0} +}; + +CONTEXTS_ALERT_STATUS contexts_alert_status_str_to_id(char *o) { + CONTEXTS_ALERT_STATUS ret = 0; + char *tok; + + while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) { + if(!*tok) continue; + + uint32_t hash = simple_hash(tok); + int i; + for(i = 0; contexts_alert_status[i].name ; i++) { + if (unlikely(hash == contexts_alert_status[i].hash && !strcmp(tok, contexts_alert_status[i].name))) { + ret |= contexts_alert_status[i].value; + break; + } + } + } + + return ret; +} + +void contexts_alerts_status_to_buffer_json_array(BUFFER *wb, const char *key, + CONTEXTS_ALERT_STATUS options) { + buffer_json_member_add_array(wb, key); + + CONTEXTS_ALERT_STATUS used = 0; // to prevent adding duplicates + for(int i = 0; contexts_alert_status[i].name ; i++) { + if (unlikely((contexts_alert_status[i].value & options) && !(contexts_alert_status[i].value & used))) { + const char *name = contexts_alert_status[i].name; + used |= contexts_alert_status[i].value; + + buffer_json_add_array_item_string(wb, name); + } + } + + buffer_json_array_close(wb); +} + +void contexts_alert_statuses_init(void) { + for(size_t i = 0; contexts_alert_status[i].name ; i++) + contexts_alert_status[i].hash = simple_hash(contexts_alert_status[i].name); +} diff --git a/src/web/api/maps/contexts_alert_statuses.h b/src/web/api/maps/contexts_alert_statuses.h new file mode 100644 index 00000000000000..1c38cb97656aab --- /dev/null +++ b/src/web/api/maps/contexts_alert_statuses.h @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CONTEXTS_ALERT_STATUSES_H +#define NETDATA_CONTEXTS_ALERT_STATUSES_H + +#include "libnetdata/libnetdata.h" + +typedef enum contexts_alert_status { + CONTEXT_ALERT_UNINITIALIZED = (1 << 6), // include UNINITIALIZED alerts + CONTEXT_ALERT_UNDEFINED = (1 << 7), // include UNDEFINED alerts + CONTEXT_ALERT_CLEAR = (1 << 8), // include CLEAR alerts + CONTEXT_ALERT_RAISED = (1 << 9), // include WARNING & CRITICAL alerts + CONTEXT_ALERT_WARNING = (1 << 10), // include WARNING alerts + CONTEXT_ALERT_CRITICAL = (1 << 11), // include CRITICAL alerts +} CONTEXTS_ALERT_STATUS; + +#define CONTEXTS_ALERT_STATUSES (CONTEXT_ALERT_UNINITIALIZED | CONTEXT_ALERT_UNDEFINED | CONTEXT_ALERT_CLEAR | \ + CONTEXT_ALERT_RAISED | CONTEXT_ALERT_WARNING | CONTEXT_ALERT_CRITICAL) + +CONTEXTS_ALERT_STATUS contexts_alert_status_str_to_id(char *o); +void contexts_alerts_status_to_buffer_json_array(BUFFER *wb, const char *key, + CONTEXTS_ALERT_STATUS options); + +void contexts_alert_statuses_init(void); + +#endif //NETDATA_CONTEXTS_ALERT_STATUSES_H diff --git a/src/web/api/maps/contexts_options.c b/src/web/api/maps/contexts_options.c new file mode 100644 index 00000000000000..22e50e8d74f9c4 --- /dev/null +++ b/src/web/api/maps/contexts_options.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "contexts_options.h" + +static struct { + const char *name; + uint32_t hash; + CONTEXTS_OPTIONS value; +} contexts_options[] = { + {"minify" , 0 , CONTEXTS_OPTION_MINIFY} + , {"debug" , 0 , CONTEXTS_OPTION_DEBUG} + , {"config" , 0 , CONTEXTS_OPTION_ALERTS_WITH_CONFIGURATIONS} + , {"instances" , 0 , CONTEXTS_OPTION_ALERTS_WITH_INSTANCES} + , {"values" , 0 , CONTEXTS_OPTION_ALERTS_WITH_VALUES} + , {"summary" , 0 , CONTEXTS_OPTION_ALERTS_WITH_SUMMARY} + , {NULL , 0 , 0} +}; + +CONTEXTS_OPTIONS contexts_options_str_to_id(char *o) { + CONTEXTS_OPTIONS ret = 0; + char *tok; + + while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) { + if(!*tok) continue; + + uint32_t hash = simple_hash(tok); + int i; + for(i = 0; contexts_options[i].name ; i++) { + if (unlikely(hash == contexts_options[i].hash && !strcmp(tok, contexts_options[i].name))) { + ret |= contexts_options[i].value; + break; + } + } + } + + return ret; +} + +void contexts_options_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_OPTIONS options) { + buffer_json_member_add_array(wb, key); + + CONTEXTS_OPTIONS used = 0; // to prevent adding duplicates + for(int i = 0; contexts_options[i].name ; i++) { + if (unlikely((contexts_options[i].value & options) && !(contexts_options[i].value & used))) { + const char *name = contexts_options[i].name; + used |= contexts_options[i].value; + + buffer_json_add_array_item_string(wb, name); + } + } + + buffer_json_array_close(wb); +} + +void contexts_options_init(void) { + for(size_t i = 0; contexts_options[i].name ; i++) + contexts_options[i].hash = simple_hash(contexts_options[i].name); +} diff --git a/src/web/api/maps/contexts_options.h b/src/web/api/maps/contexts_options.h new file mode 100644 index 00000000000000..a21bd76cab994c --- /dev/null +++ b/src/web/api/maps/contexts_options.h @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_CONTEXTS_OPTIONS_H +#define NETDATA_CONTEXTS_OPTIONS_H + +#include "libnetdata/libnetdata.h" + +typedef enum contexts_options { + CONTEXTS_OPTION_MINIFY = (1 << 0), // remove JSON spaces and newlines from JSON output + CONTEXTS_OPTION_DEBUG = (1 << 1), // show the request + CONTEXTS_OPTION_ALERTS_WITH_CONFIGURATIONS = (1 << 2), // include alert configurations (used by /api/v2/alert_transitions) + CONTEXTS_OPTION_ALERTS_WITH_INSTANCES = (1 << 3), // include alert instances (used by /api/v2/alerts) + CONTEXTS_OPTION_ALERTS_WITH_VALUES = (1 << 4), // include alert latest values (used by /api/v2/alerts) + CONTEXTS_OPTION_ALERTS_WITH_SUMMARY = (1 << 5), // include alerts summary counters (used by /api/v2/alerts) +} CONTEXTS_OPTIONS; + +CONTEXTS_OPTIONS contexts_options_str_to_id(char *o); +void contexts_options_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_OPTIONS options); + +void contexts_options_init(void); + +#endif //NETDATA_CONTEXTS_OPTIONS_H diff --git a/src/web/api/maps/datasource_formats.c b/src/web/api/maps/datasource_formats.c new file mode 100644 index 00000000000000..33e1e74579229d --- /dev/null +++ b/src/web/api/maps/datasource_formats.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "datasource_formats.h" + +static struct { + const char *name; + uint32_t hash; + DATASOURCE_FORMAT value; +} google_data_formats[] = { + // this is not an error - when Google requests json, it expects javascript + // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source#responseformat + {"json", 0, DATASOURCE_DATATABLE_JSONP} + , {"html", 0, DATASOURCE_HTML} + , {"csv", 0, DATASOURCE_CSV} + , {"tsv-excel", 0, DATASOURCE_TSV} + + // terminator + , {NULL, 0, 0} +}; + +inline DATASOURCE_FORMAT google_data_format_str_to_id(char *name) { + uint32_t hash = simple_hash(name); + int i; + + for(i = 0; google_data_formats[i].name ; i++) { + if (unlikely(hash == google_data_formats[i].hash && !strcmp(name, google_data_formats[i].name))) { + return google_data_formats[i].value; + } + } + + return DATASOURCE_JSON; +} + +// -------------------------------------------------------------------------------------------------------------------- + +static struct { + const char *name; + uint32_t hash; + DATASOURCE_FORMAT value; +} datasource_formats[] = { + { "datatable" , 0 , DATASOURCE_DATATABLE_JSON} + , {"datasource" , 0 , DATASOURCE_DATATABLE_JSONP} + , {"json" , 0 , DATASOURCE_JSON} + , {"json2" , 0 , DATASOURCE_JSON2} + , {"jsonp" , 0 , DATASOURCE_JSONP} + , {"ssv" , 0 , DATASOURCE_SSV} + , {"csv" , 0 , DATASOURCE_CSV} + , {"tsv" , 0 , DATASOURCE_TSV} + , {"tsv-excel" , 0 , DATASOURCE_TSV} + , {"html" , 0 , DATASOURCE_HTML} + , {"array" , 0 , DATASOURCE_JS_ARRAY} + , {"ssvcomma" , 0 , DATASOURCE_SSV_COMMA} + , {"csvjsonarray" , 0 , DATASOURCE_CSV_JSON_ARRAY} + , {"markdown" , 0 , DATASOURCE_CSV_MARKDOWN} + + // terminator + , {NULL, 0, 0} +}; + +DATASOURCE_FORMAT datasource_format_str_to_id(char *name) { + uint32_t hash = simple_hash(name); + int i; + + for(i = 0; datasource_formats[i].name ; i++) { + if (unlikely(hash == datasource_formats[i].hash && !strcmp(name, datasource_formats[i].name))) { + return datasource_formats[i].value; + } + } + + return DATASOURCE_JSON; +} + +const char *rrdr_format_to_string(DATASOURCE_FORMAT format) { + for(size_t i = 0; datasource_formats[i].name ;i++) + if(unlikely(datasource_formats[i].value == format)) + return datasource_formats[i].name; + + return "unknown"; +} + +// -------------------------------------------------------------------------------------------------------------------- + +void datasource_formats_init(void) { + for(size_t i = 0; datasource_formats[i].name ; i++) + datasource_formats[i].hash = simple_hash(datasource_formats[i].name); + + for(size_t i = 0; google_data_formats[i].name ; i++) + google_data_formats[i].hash = simple_hash(google_data_formats[i].name); +} diff --git a/src/web/api/maps/datasource_formats.h b/src/web/api/maps/datasource_formats.h new file mode 100644 index 00000000000000..50d8a82b410cbd --- /dev/null +++ b/src/web/api/maps/datasource_formats.h @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DATASOURCE_FORMATS_H +#define NETDATA_DATASOURCE_FORMATS_H + +#include "libnetdata/libnetdata.h" + +// type of JSON generations +typedef enum { + DATASOURCE_JSON = 0, + DATASOURCE_DATATABLE_JSON, + DATASOURCE_DATATABLE_JSONP, + DATASOURCE_SSV, + DATASOURCE_CSV, + DATASOURCE_JSONP, + DATASOURCE_TSV, + DATASOURCE_HTML, + DATASOURCE_JS_ARRAY, + DATASOURCE_SSV_COMMA, + DATASOURCE_CSV_JSON_ARRAY, + DATASOURCE_CSV_MARKDOWN, + DATASOURCE_JSON2, +} DATASOURCE_FORMAT; + +DATASOURCE_FORMAT datasource_format_str_to_id(char *name); +const char *rrdr_format_to_string(DATASOURCE_FORMAT format); + +DATASOURCE_FORMAT google_data_format_str_to_id(char *name); + +void datasource_formats_init(void); + +#endif //NETDATA_DATASOURCE_FORMATS_H diff --git a/src/web/api/maps/maps.h b/src/web/api/maps/maps.h new file mode 100644 index 00000000000000..25d210235bb332 --- /dev/null +++ b/src/web/api/maps/maps.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_MAPS_H +#define NETDATA_MAPS_H + +#include "libnetdata/libnetdata.h" +#include "datasource_formats.h" +#include "contexts_options.h" +#include "rrdr_options.h" +#include "contexts_alert_statuses.h" + +#endif //NETDATA_MAPS_H diff --git a/src/web/api/maps/rrdr_options.c b/src/web/api/maps/rrdr_options.c new file mode 100644 index 00000000000000..41161d8023be72 --- /dev/null +++ b/src/web/api/maps/rrdr_options.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "rrdr_options.h" + +static struct { + const char *name; + uint32_t hash; + RRDR_OPTIONS value; +} rrdr_options[] = { + { "nonzero" , 0 , RRDR_OPTION_NONZERO} + , {"flip" , 0 , RRDR_OPTION_REVERSED} + , {"reversed" , 0 , RRDR_OPTION_REVERSED} + , {"reverse" , 0 , RRDR_OPTION_REVERSED} + , {"jsonwrap" , 0 , RRDR_OPTION_JSON_WRAP} + , {"min2max" , 0 , RRDR_OPTION_DIMS_MIN2MAX} // rrdr2value() only + , {"average" , 0 , RRDR_OPTION_DIMS_AVERAGE} // rrdr2value() only + , {"min" , 0 , RRDR_OPTION_DIMS_MIN} // rrdr2value() only + , {"max" , 0 , RRDR_OPTION_DIMS_MAX} // rrdr2value() only + , {"ms" , 0 , RRDR_OPTION_MILLISECONDS} + , {"milliseconds" , 0 , RRDR_OPTION_MILLISECONDS} + , {"absolute" , 0 , RRDR_OPTION_ABSOLUTE} + , {"abs" , 0 , RRDR_OPTION_ABSOLUTE} + , {"absolute_sum" , 0 , RRDR_OPTION_ABSOLUTE} + , {"absolute-sum" , 0 , RRDR_OPTION_ABSOLUTE} + , {"display_absolute" , 0 , RRDR_OPTION_DISPLAY_ABS} + , {"display-absolute" , 0 , RRDR_OPTION_DISPLAY_ABS} + , {"seconds" , 0 , RRDR_OPTION_SECONDS} + , {"null2zero" , 0 , RRDR_OPTION_NULL2ZERO} + , {"objectrows" , 0 , RRDR_OPTION_OBJECTSROWS} + , {"google_json" , 0 , RRDR_OPTION_GOOGLE_JSON} + , {"google-json" , 0 , RRDR_OPTION_GOOGLE_JSON} + , {"percentage" , 0 , RRDR_OPTION_PERCENTAGE} + , {"unaligned" , 0 , RRDR_OPTION_NOT_ALIGNED} + , {"match_ids" , 0 , RRDR_OPTION_MATCH_IDS} + , {"match-ids" , 0 , RRDR_OPTION_MATCH_IDS} + , {"match_names" , 0 , RRDR_OPTION_MATCH_NAMES} + , {"match-names" , 0 , RRDR_OPTION_MATCH_NAMES} + , {"anomaly-bit" , 0 , RRDR_OPTION_ANOMALY_BIT} + , {"selected-tier" , 0 , RRDR_OPTION_SELECTED_TIER} + , {"raw" , 0 , RRDR_OPTION_RETURN_RAW} + , {"jw-anomaly-rates" , 0 , RRDR_OPTION_RETURN_JWAR} + , {"natural-points" , 0 , RRDR_OPTION_NATURAL_POINTS} + , {"virtual-points" , 0 , RRDR_OPTION_VIRTUAL_POINTS} + , {"all-dimensions" , 0 , RRDR_OPTION_ALL_DIMENSIONS} + , {"details" , 0 , RRDR_OPTION_SHOW_DETAILS} + , {"debug" , 0 , RRDR_OPTION_DEBUG} + , {"plan" , 0 , RRDR_OPTION_DEBUG} + , {"minify" , 0 , RRDR_OPTION_MINIFY} + , {"group-by-labels" , 0 , RRDR_OPTION_GROUP_BY_LABELS} + , {"label-quotes" , 0 , RRDR_OPTION_LABEL_QUOTES} + , {NULL , 0 , 0} +}; + +RRDR_OPTIONS rrdr_options_parse_one(const char *o) { + RRDR_OPTIONS ret = 0; + + if(!o || !*o) return ret; + + uint32_t hash = simple_hash(o); + int i; + for(i = 0; rrdr_options[i].name ; i++) { + if (unlikely(hash == rrdr_options[i].hash && !strcmp(o, rrdr_options[i].name))) { + ret |= rrdr_options[i].value; + break; + } + } + + return ret; +} + +RRDR_OPTIONS rrdr_options_parse(char *o) { + RRDR_OPTIONS ret = 0; + char *tok; + + while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) { + if(!*tok) continue; + ret |= rrdr_options_parse_one(tok); + } + + return ret; +} + +void rrdr_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options) { + buffer_json_member_add_array(wb, key); + + RRDR_OPTIONS used = 0; // to prevent adding duplicates + for(int i = 0; rrdr_options[i].name ; i++) { + if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) { + const char *name = rrdr_options[i].name; + used |= rrdr_options[i].value; + + buffer_json_add_array_item_string(wb, name); + } + } + + buffer_json_array_close(wb); +} + +void rrdr_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options) { + RRDR_OPTIONS used = 0; // to prevent adding duplicates + size_t added = 0; + for(int i = 0; rrdr_options[i].name ; i++) { + if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) { + const char *name = rrdr_options[i].name; + used |= rrdr_options[i].value; + + if(added++) buffer_strcat(wb, " "); + buffer_strcat(wb, name); + } + } +} + +void web_client_api_request_data_vX_options_to_string(char *buf, size_t size, RRDR_OPTIONS options) { + char *write = buf; + char *end = &buf[size - 1]; + + RRDR_OPTIONS used = 0; // to prevent adding duplicates + int added = 0; + for(int i = 0; rrdr_options[i].name ; i++) { + if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) { + const char *name = rrdr_options[i].name; + used |= rrdr_options[i].value; + + if(added && write < end) + *write++ = ','; + + while(*name && write < end) + *write++ = *name++; + + added++; + } + } + *write = *end = '\0'; +} + +void rrdr_options_init(void) { + for(size_t i = 0; rrdr_options[i].name ; i++) + rrdr_options[i].hash = simple_hash(rrdr_options[i].name); +} diff --git a/src/web/api/maps/rrdr_options.h b/src/web/api/maps/rrdr_options.h new file mode 100644 index 00000000000000..4b6697dba29ee0 --- /dev/null +++ b/src/web/api/maps/rrdr_options.h @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RRDR_OPTIONS_H +#define NETDATA_RRDR_OPTIONS_H + +#include "libnetdata/libnetdata.h" + +typedef enum rrdr_options { + RRDR_OPTION_NONZERO = (1 << 0), // don't output dimensions with just zero values + RRDR_OPTION_REVERSED = (1 << 1), // output the rows in reverse order (oldest to newest) + RRDR_OPTION_ABSOLUTE = (1 << 2), // values positive, for DATASOURCE_SSV before summing + RRDR_OPTION_DIMS_MIN2MAX = (1 << 3), // when adding dimensions, use max - min, instead of sum + RRDR_OPTION_DIMS_AVERAGE = (1 << 4), // when adding dimensions, use average, instead of sum + RRDR_OPTION_DIMS_MIN = (1 << 5), // when adding dimensions, use minimum, instead of sum + RRDR_OPTION_DIMS_MAX = (1 << 6), // when adding dimensions, use maximum, instead of sum + RRDR_OPTION_SECONDS = (1 << 7), // output seconds, instead of dates + RRDR_OPTION_MILLISECONDS = (1 << 8), // output milliseconds, instead of dates + RRDR_OPTION_NULL2ZERO = (1 << 9), // do not show nulls, convert them to zeros + RRDR_OPTION_OBJECTSROWS = (1 << 10), // each row of values should be an object, not an array + RRDR_OPTION_GOOGLE_JSON = (1 << 11), // comply with google JSON/JSONP specs + RRDR_OPTION_JSON_WRAP = (1 << 12), // wrap the response in a JSON header with info about the result + RRDR_OPTION_LABEL_QUOTES = (1 << 13), // in CSV output, wrap header labels in double quotes + RRDR_OPTION_PERCENTAGE = (1 << 14), // give values as percentage of total + RRDR_OPTION_NOT_ALIGNED = (1 << 15), // do not align charts for persistent timeframes + RRDR_OPTION_DISPLAY_ABS = (1 << 16), // for badges, display the absolute value, but calculate colors with sign + RRDR_OPTION_MATCH_IDS = (1 << 17), // when filtering dimensions, match only IDs + RRDR_OPTION_MATCH_NAMES = (1 << 18), // when filtering dimensions, match only names + RRDR_OPTION_NATURAL_POINTS = (1 << 19), // return the natural points of the database + RRDR_OPTION_VIRTUAL_POINTS = (1 << 20), // return virtual points + RRDR_OPTION_ANOMALY_BIT = (1 << 21), // Return the anomaly bit stored in each collected_number + RRDR_OPTION_RETURN_RAW = (1 << 22), // Return raw data for aggregating across multiple nodes + RRDR_OPTION_RETURN_JWAR = (1 << 23), // Return anomaly rates in jsonwrap + RRDR_OPTION_SELECTED_TIER = (1 << 24), // Use the selected tier for the query + RRDR_OPTION_ALL_DIMENSIONS = (1 << 25), // Return the full dimensions list + RRDR_OPTION_SHOW_DETAILS = (1 << 26), // v2 returns detailed object tree + RRDR_OPTION_DEBUG = (1 << 27), // v2 returns request description + RRDR_OPTION_MINIFY = (1 << 28), // remove JSON spaces and newlines from JSON output + RRDR_OPTION_GROUP_BY_LABELS = (1 << 29), // v2 returns flattened labels per dimension of the chart + + // internal ones - not to be exposed to the API + RRDR_OPTION_INTERNAL_AR = (1 << 31), // internal use only, to let the formatters know we want to render the anomaly rate +} RRDR_OPTIONS; + +void rrdr_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options); +void rrdr_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options); +void web_client_api_request_data_vX_options_to_string(char *buf, size_t size, RRDR_OPTIONS options); +void rrdr_options_init(void); + +RRDR_OPTIONS rrdr_options_parse(char *o); +RRDR_OPTIONS rrdr_options_parse_one(const char *o); + +#endif //NETDATA_RRDR_OPTIONS_H diff --git a/src/web/api/queries/rrdr.h b/src/web/api/queries/rrdr.h index d36d3f5b3bd5dd..860a375c9ff1b6 100644 --- a/src/web/api/queries/rrdr.h +++ b/src/web/api/queries/rrdr.h @@ -17,62 +17,6 @@ typedef enum tier_query_fetch { TIER_QUERY_FETCH_AVERAGE } TIER_QUERY_FETCH; -typedef enum rrdr_options { - RRDR_OPTION_NONZERO = (1 << 0), // don't output dimensions with just zero values - RRDR_OPTION_REVERSED = (1 << 1), // output the rows in reverse order (oldest to newest) - RRDR_OPTION_ABSOLUTE = (1 << 2), // values positive, for DATASOURCE_SSV before summing - RRDR_OPTION_DIMS_MIN2MAX = (1 << 3), // when adding dimensions, use max - min, instead of sum - RRDR_OPTION_DIMS_AVERAGE = (1 << 4), // when adding dimensions, use average, instead of sum - RRDR_OPTION_DIMS_MIN = (1 << 5), // when adding dimensions, use minimum, instead of sum - RRDR_OPTION_DIMS_MAX = (1 << 6), // when adding dimensions, use maximum, instead of sum - RRDR_OPTION_SECONDS = (1 << 7), // output seconds, instead of dates - RRDR_OPTION_MILLISECONDS = (1 << 8), // output milliseconds, instead of dates - RRDR_OPTION_NULL2ZERO = (1 << 9), // do not show nulls, convert them to zeros - RRDR_OPTION_OBJECTSROWS = (1 << 10), // each row of values should be an object, not an array - RRDR_OPTION_GOOGLE_JSON = (1 << 11), // comply with google JSON/JSONP specs - RRDR_OPTION_JSON_WRAP = (1 << 12), // wrap the response in a JSON header with info about the result - RRDR_OPTION_LABEL_QUOTES = (1 << 13), // in CSV output, wrap header labels in double quotes - RRDR_OPTION_PERCENTAGE = (1 << 14), // give values as percentage of total - RRDR_OPTION_NOT_ALIGNED = (1 << 15), // do not align charts for persistent timeframes - RRDR_OPTION_DISPLAY_ABS = (1 << 16), // for badges, display the absolute value, but calculate colors with sign - RRDR_OPTION_MATCH_IDS = (1 << 17), // when filtering dimensions, match only IDs - RRDR_OPTION_MATCH_NAMES = (1 << 18), // when filtering dimensions, match only names - RRDR_OPTION_NATURAL_POINTS = (1 << 19), // return the natural points of the database - RRDR_OPTION_VIRTUAL_POINTS = (1 << 20), // return virtual points - RRDR_OPTION_ANOMALY_BIT = (1 << 21), // Return the anomaly bit stored in each collected_number - RRDR_OPTION_RETURN_RAW = (1 << 22), // Return raw data for aggregating across multiple nodes - RRDR_OPTION_RETURN_JWAR = (1 << 23), // Return anomaly rates in jsonwrap - RRDR_OPTION_SELECTED_TIER = (1 << 24), // Use the selected tier for the query - RRDR_OPTION_ALL_DIMENSIONS = (1 << 25), // Return the full dimensions list - RRDR_OPTION_SHOW_DETAILS = (1 << 26), // v2 returns detailed object tree - RRDR_OPTION_DEBUG = (1 << 27), // v2 returns request description - RRDR_OPTION_MINIFY = (1 << 28), // remove JSON spaces and newlines from JSON output - RRDR_OPTION_GROUP_BY_LABELS = (1 << 29), // v2 returns flattened labels per dimension of the chart - - // internal ones - not to be exposed to the API - RRDR_OPTION_INTERNAL_AR = (1 << 31), // internal use only, to let the formatters know we want to render the anomaly rate -} RRDR_OPTIONS; - -typedef enum context_v2_options { - CONTEXT_V2_OPTION_MINIFY = (1 << 0), // remove JSON spaces and newlines from JSON output - CONTEXT_V2_OPTION_DEBUG = (1 << 1), // show the request - CONTEXT_V2_OPTION_ALERTS_WITH_CONFIGURATIONS = (1 << 2), // include alert configurations (used by /api/v2/alert_transitions) - CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES = (1 << 3), // include alert instances (used by /api/v2/alerts) - CONTEXT_V2_OPTION_ALERTS_WITH_VALUES = (1 << 4), // include alert latest values (used by /api/v2/alerts) - CONTEXT_V2_OPTION_ALERTS_WITH_SUMMARY = (1 << 5), // include alerts summary counters (used by /api/v2/alerts) -} CONTEXTS_V2_OPTIONS; - -typedef enum context_v2_alert_status { - CONTEXT_V2_ALERT_UNINITIALIZED = (1 << 5), // include UNINITIALIZED alerts - CONTEXT_V2_ALERT_UNDEFINED = (1 << 6), // include UNDEFINED alerts - CONTEXT_V2_ALERT_CLEAR = (1 << 7), // include CLEAR alerts - CONTEXT_V2_ALERT_RAISED = (1 << 8), // include WARNING & CRITICAL alerts - CONTEXT_V2_ALERT_WARNING = (1 << 9), // include WARNING alerts - CONTEXT_V2_ALERT_CRITICAL = (1 << 10), // include CRITICAL alerts -} CONTEXTS_V2_ALERT_STATUS; - -#define CONTEXTS_V2_ALERT_STATUSES (CONTEXT_V2_ALERT_UNINITIALIZED|CONTEXT_V2_ALERT_UNDEFINED|CONTEXT_V2_ALERT_CLEAR|CONTEXT_V2_ALERT_RAISED|CONTEXT_V2_ALERT_WARNING|CONTEXT_V2_ALERT_CRITICAL) - typedef enum __attribute__ ((__packed__)) rrdr_value_flag { // IMPORTANT: diff --git a/src/web/api/v1/api_v1_aclk.c b/src/web/api/v1/api_v1_aclk.c new file mode 100644 index 00000000000000..b9878db2fb41bf --- /dev/null +++ b/src/web/api/v1/api_v1_aclk.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_aclk(RRDHOST *host, struct web_client *w, char *url) { + UNUSED(url); + UNUSED(host); + if (!netdata_ready) return HTTP_RESP_SERVICE_UNAVAILABLE; + + BUFFER *wb = w->response.data; + buffer_flush(wb); + char *str = aclk_state_json(); + buffer_strcat(wb, str); + freez(str); + + wb->content_type = CT_APPLICATION_JSON; + buffer_no_cacheable(wb); + return HTTP_RESP_OK; +} + diff --git a/src/web/api/v1/api_v1_alarms.c b/src/web/api/v1/api_v1_alarms.c new file mode 100644 index 00000000000000..4f3af74b5ca0a2 --- /dev/null +++ b/src/web/api/v1/api_v1_alarms.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +static int web_client_api_request_v1_alarms_select(char *url) { + int all = 0; + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + if(!strcmp(value, "all") || !strcmp(value, "all=true")) all = 1; + else if(!strcmp(value, "active") || !strcmp(value, "active=true")) all = 0; + } + + return all; +} + +int api_v1_alarms(RRDHOST *host, struct web_client *w, char *url) { + int all = web_client_api_request_v1_alarms_select(url); + + buffer_flush(w->response.data); + w->response.data->content_type = CT_APPLICATION_JSON; + health_alarms2json(host, w->response.data, all); + buffer_no_cacheable(w->response.data); + return HTTP_RESP_OK; +} + +int api_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url) { + int all = web_client_api_request_v1_alarms_select(url); + + buffer_flush(w->response.data); + w->response.data->content_type = CT_APPLICATION_JSON; + health_alarms_values2json(host, w->response.data, all); + buffer_no_cacheable(w->response.data); + return HTTP_RESP_OK; +} + +int api_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url) { + RRDCALC_STATUS status = RRDCALC_STATUS_RAISED; + BUFFER *contexts = NULL; + + buffer_flush(w->response.data); + buffer_sprintf(w->response.data, "["); + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 alarm_count query param '%s' with value '%s'", w->id, name, value); + + char* p = value; + if(!strcmp(name, "status")) { + while ((*p = toupper(*p))) p++; + if (!strcmp("CRITICAL", value)) status = RRDCALC_STATUS_CRITICAL; + else if (!strcmp("WARNING", value)) status = RRDCALC_STATUS_WARNING; + else if (!strcmp("UNINITIALIZED", value)) status = RRDCALC_STATUS_UNINITIALIZED; + else if (!strcmp("UNDEFINED", value)) status = RRDCALC_STATUS_UNDEFINED; + else if (!strcmp("REMOVED", value)) status = RRDCALC_STATUS_REMOVED; + else if (!strcmp("CLEAR", value)) status = RRDCALC_STATUS_CLEAR; + } + else if(!strcmp(name, "context") || !strcmp(name, "ctx")) { + if(!contexts) contexts = buffer_create(255, &netdata_buffers_statistics.buffers_api); + buffer_strcat(contexts, "|"); + buffer_strcat(contexts, value); + } + } + + health_aggregate_alarms(host, w->response.data, contexts, status); + + buffer_sprintf(w->response.data, "]\n"); + w->response.data->content_type = CT_APPLICATION_JSON; + buffer_no_cacheable(w->response.data); + + buffer_free(contexts); + return 200; +} + +int api_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url) { + time_t after = 0; + char *chart = NULL; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + if (!strcmp(name, "after")) after = (time_t) strtoul(value, NULL, 0); + else if (!strcmp(name, "chart")) chart = value; + } + + buffer_flush(w->response.data); + w->response.data->content_type = CT_APPLICATION_JSON; + sql_health_alarm_log2json(host, w->response.data, after, chart); + return HTTP_RESP_OK; +} + +int api_v1_variable(RRDHOST *host, struct web_client *w, char *url) { + int ret = HTTP_RESP_BAD_REQUEST; + char *chart = NULL; + char *variable = NULL; + + buffer_flush(w->response.data); + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "chart")) chart = value; + else if(!strcmp(name, "variable")) variable = value; + } + + if(!chart || !*chart || !variable || !*variable) { + buffer_sprintf(w->response.data, "A chart= and a variable= are required."); + goto cleanup; + } + + RRDSET *st = rrdset_find(host, chart); + if(!st) st = rrdset_find_byname(host, chart); + if(!st) { + buffer_strcat(w->response.data, "Chart is not found: "); + buffer_strcat_htmlescape(w->response.data, chart); + ret = HTTP_RESP_NOT_FOUND; + goto cleanup; + } + + w->response.data->content_type = CT_APPLICATION_JSON; + st->last_accessed_time_s = now_realtime_sec(); + alert_variable_lookup_trace(host, st, variable, w->response.data); + + return HTTP_RESP_OK; + +cleanup: + return ret; +} + +int api_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url) { + return api_v1_single_chart_helper(host, w, url, health_api_v1_chart_variables2json); +} + diff --git a/src/web/api/exporters/shell/allmetrics_shell.c b/src/web/api/v1/api_v1_allmetrics.c similarity index 52% rename from src/web/api/exporters/shell/allmetrics_shell.c rename to src/web/api/v1/api_v1_allmetrics.c index c8248c14862ca3..593475efde74ff 100644 --- a/src/web/api/exporters/shell/allmetrics_shell.c +++ b/src/web/api/v1/api_v1_allmetrics.c @@ -1,6 +1,29 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "allmetrics_shell.h" +#include "api_v1_calls.h" + +#define ALLMETRICS_FORMAT_SHELL "shell" +#define ALLMETRICS_FORMAT_PROMETHEUS "prometheus" +#define ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "prometheus_all_hosts" +#define ALLMETRICS_FORMAT_JSON "json" + +#define ALLMETRICS_SHELL 1 +#define ALLMETRICS_PROMETHEUS 2 +#define ALLMETRICS_JSON 3 +#define ALLMETRICS_PROMETHEUS_ALL_HOSTS 4 + +struct prometheus_output_options { + char *name; + PROMETHEUS_OUTPUT_OPTIONS flag; +} prometheus_output_flags_root[] = { + { "names", PROMETHEUS_OUTPUT_NAMES }, + { "timestamps", PROMETHEUS_OUTPUT_TIMESTAMPS }, + { "variables", PROMETHEUS_OUTPUT_VARIABLES }, + { "oldunits", PROMETHEUS_OUTPUT_OLDUNITS }, + { "hideunits", PROMETHEUS_OUTPUT_HIDEUNITS }, + // terminator + { NULL, PROMETHEUS_OUTPUT_NONE }, +}; // ---------------------------------------------------------------------------- // BASH @@ -168,3 +191,118 @@ void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_s simple_pattern_free(filter); } +int api_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url) { + int format = ALLMETRICS_SHELL; + const char *filter = NULL; + const char *prometheus_server = w->client_ip; + + uint32_t prometheus_exporting_options; + if (prometheus_exporter_instance) + prometheus_exporting_options = prometheus_exporter_instance->config.options; + else + prometheus_exporting_options = global_exporting_options; + + PROMETHEUS_OUTPUT_OPTIONS prometheus_output_options = + PROMETHEUS_OUTPUT_TIMESTAMPS | + ((prometheus_exporting_options & EXPORTING_OPTION_SEND_NAMES) ? PROMETHEUS_OUTPUT_NAMES : 0); + + const char *prometheus_prefix; + if (prometheus_exporter_instance) + prometheus_prefix = prometheus_exporter_instance->config.prefix; + else + prometheus_prefix = global_exporting_prefix; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + if(!strcmp(name, "format")) { + if(!strcmp(value, ALLMETRICS_FORMAT_SHELL)) + format = ALLMETRICS_SHELL; + else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS)) + format = ALLMETRICS_PROMETHEUS; + else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS)) + format = ALLMETRICS_PROMETHEUS_ALL_HOSTS; + else if(!strcmp(value, ALLMETRICS_FORMAT_JSON)) + format = ALLMETRICS_JSON; + else + format = 0; + } + else if(!strcmp(name, "filter")) { + filter = value; + } + else if(!strcmp(name, "server")) { + prometheus_server = value; + } + else if(!strcmp(name, "prefix")) { + prometheus_prefix = value; + } + else if(!strcmp(name, "data") || !strcmp(name, "source") || !strcmp(name, "data source") || !strcmp(name, "data-source") || !strcmp(name, "data_source") || !strcmp(name, "datasource")) { + prometheus_exporting_options = exporting_parse_data_source(value, prometheus_exporting_options); + } + else { + int i; + for(i = 0; prometheus_output_flags_root[i].name ; i++) { + if(!strcmp(name, prometheus_output_flags_root[i].name)) { + if(!strcmp(value, "yes") || !strcmp(value, "1") || !strcmp(value, "true")) + prometheus_output_options |= prometheus_output_flags_root[i].flag; + else { + prometheus_output_options &= ~prometheus_output_flags_root[i].flag; + } + + break; + } + } + } + } + + buffer_flush(w->response.data); + buffer_no_cacheable(w->response.data); + + switch(format) { + case ALLMETRICS_JSON: + w->response.data->content_type = CT_APPLICATION_JSON; + rrd_stats_api_v1_charts_allmetrics_json(host, filter, w->response.data); + return HTTP_RESP_OK; + + case ALLMETRICS_SHELL: + w->response.data->content_type = CT_TEXT_PLAIN; + rrd_stats_api_v1_charts_allmetrics_shell(host, filter, w->response.data); + return HTTP_RESP_OK; + + case ALLMETRICS_PROMETHEUS: + w->response.data->content_type = CT_PROMETHEUS; + rrd_stats_api_v1_charts_allmetrics_prometheus_single_host( + host + , filter + , w->response.data + , prometheus_server + , prometheus_prefix + , prometheus_exporting_options + , prometheus_output_options + ); + return HTTP_RESP_OK; + + case ALLMETRICS_PROMETHEUS_ALL_HOSTS: + w->response.data->content_type = CT_PROMETHEUS; + rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts( + host + , filter + , w->response.data + , prometheus_server + , prometheus_prefix + , prometheus_exporting_options + , prometheus_output_options + ); + return HTTP_RESP_OK; + + default: + w->response.data->content_type = CT_TEXT_PLAIN; + buffer_strcat(w->response.data, "Which format? '" ALLMETRICS_FORMAT_SHELL "', '" ALLMETRICS_FORMAT_PROMETHEUS "', '" ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "' and '" ALLMETRICS_FORMAT_JSON "' are currently supported."); + return HTTP_RESP_BAD_REQUEST; + } +} diff --git a/src/web/api/badges/README.md b/src/web/api/v1/api_v1_badge/README.md similarity index 100% rename from src/web/api/badges/README.md rename to src/web/api/v1/api_v1_badge/README.md diff --git a/src/web/api/badges/web_buffer_svg.c b/src/web/api/v1/api_v1_badge/web_buffer_svg.c similarity index 99% rename from src/web/api/badges/web_buffer_svg.c rename to src/web/api/v1/api_v1_badge/web_buffer_svg.c index 747c46d5eeba40..642261fd3581a8 100644 --- a/src/web/api/badges/web_buffer_svg.c +++ b/src/web/api/v1/api_v1_badge/web_buffer_svg.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "web_buffer_svg.h" +#include "libnetdata/libnetdata.h" +#include "../../../server/web_client.h" #define BADGE_HORIZONTAL_PADDING 4 #define VERDANA_KERNING 0.2 @@ -360,7 +361,7 @@ static struct units_formatter { { NULL, 0, UNITS_FORMAT_NONE } }; -inline char *format_value_and_unit(char *value_string, size_t value_string_len, +char *format_value_and_unit(char *value_string, size_t value_string_len, NETDATA_DOUBLE value, const char *units, int precision) { static int max = -1; int i; @@ -734,7 +735,7 @@ static const char *parse_color_argument(const char *arg, const char *def) return color_map(arg, def); } -void buffer_svg(BUFFER *wb, const char *label, +static void buffer_svg(BUFFER *wb, const char *label, NETDATA_DOUBLE value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val) { char value_color_buffer[COLOR_STRING_SIZE + 1] , value_string[VALUE_STRING_SIZE + 1] @@ -864,7 +865,7 @@ void buffer_svg(BUFFER *wb, const char *label, #define BADGE_URL_ARG_LBL_COLOR "text_color_lbl" #define BADGE_URL_ARG_VAL_COLOR "text_color_val" -int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url) { +int api_v1_badge(RRDHOST *host, struct web_client *w, char *url) { int ret = HTTP_RESP_BAD_REQUEST; buffer_flush(w->response.data); diff --git a/src/web/api/v1/api_v1_calls.h b/src/web/api/v1/api_v1_calls.h new file mode 100644 index 00000000000000..36a0605cb1b071 --- /dev/null +++ b/src/web/api/v1/api_v1_calls.h @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_V1_CALLS_H +#define NETDATA_API_V1_CALLS_H + +#include "../web_api_v1.h" + +int api_v1_info(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_config(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_registry(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_manage(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_data(RRDHOST *host, struct web_client *w, char *url); +int api_v1_chart(RRDHOST *host, struct web_client *w, char *url); +int api_v1_charts(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_context(RRDHOST *host, struct web_client *w, char *url); +int api_v1_contexts(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_alarms(RRDHOST *host, struct web_client *w, char *url); +int api_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url); +int api_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url); +int api_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url); +int api_v1_variable(RRDHOST *host, struct web_client *w, char *url); +int api_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_dbengine_stats(RRDHOST *host, struct web_client *w, char *url); +int api_v1_ml_info(RRDHOST *host, struct web_client *w, char *url); +int api_v1_aclk(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_functions(RRDHOST *host, struct web_client *w, char *url); +int api_v1_function(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_metric_correlations(RRDHOST *host, struct web_client *w, char *url); +int api_v1_weights(RRDHOST *host, struct web_client *w, char *url); + +int api_v1_badge(RRDHOST *host, struct web_client *w, char *url); +int api_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url); + +// common library calls +int api_v1_single_chart_helper(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)); +void api_v1_management_init(void); + +#endif //NETDATA_API_V1_CALLS_H diff --git a/src/web/api/v1/api_v1_charts.c b/src/web/api/v1/api_v1_charts.c new file mode 100644 index 00000000000000..afc67af68499a1 --- /dev/null +++ b/src/web/api/v1/api_v1_charts.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_single_chart_helper(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)) { + int ret = HTTP_RESP_BAD_REQUEST; + char *chart = NULL; + + buffer_flush(w->response.data); + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "chart")) chart = value; + //else { + /// buffer_sprintf(w->response.data, "Unknown parameter '%s' in request.", name); + // goto cleanup; + //} + } + + if(!chart || !*chart) { + buffer_sprintf(w->response.data, "No chart id is given at the request."); + goto cleanup; + } + + RRDSET *st = rrdset_find(host, chart); + if(!st) st = rrdset_find_byname(host, chart); + if(!st) { + buffer_strcat(w->response.data, "Chart is not found: "); + buffer_strcat_htmlescape(w->response.data, chart); + ret = HTTP_RESP_NOT_FOUND; + goto cleanup; + } + + w->response.data->content_type = CT_APPLICATION_JSON; + st->last_accessed_time_s = now_realtime_sec(); + callback(st, w->response.data); + return HTTP_RESP_OK; + +cleanup: + return ret; +} + +int api_v1_charts(RRDHOST *host, struct web_client *w, char *url) { + (void)url; + + buffer_flush(w->response.data); + w->response.data->content_type = CT_APPLICATION_JSON; + charts2json(host, w->response.data); + return HTTP_RESP_OK; +} + +int api_v1_chart(RRDHOST *host, struct web_client *w, char *url) { + return api_v1_single_chart_helper(host, w, url, rrd_stats_api_v1_chart); +} + diff --git a/src/web/api/v1/api_v1_config.c b/src/web/api/v1/api_v1_config.c new file mode 100644 index 00000000000000..9e71998d980f27 --- /dev/null +++ b/src/web/api/v1/api_v1_config.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "web/api/v2/api_v2_calls.h" + +int api_v1_config(RRDHOST *host, struct web_client *w, char *url __maybe_unused) { + char *action = "tree"; + char *path = "/"; + char *id = NULL; + char *add_name = NULL; + int timeout = 120; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "action")) + action = value; + else if(!strcmp(name, "path")) + path = value; + else if(!strcmp(name, "id")) + id = value; + else if(!strcmp(name, "name")) + add_name = value; + else if(!strcmp(name, "timeout")) { + timeout = (int)strtol(value, NULL, 10); + if(timeout < 10) + timeout = 10; + } + } + + char transaction[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(w->transaction, transaction); + + size_t len = strlen(action) + (id ? strlen(id) : 0) + strlen(path) + (add_name ? strlen(add_name) : 0) + 100; + + char cmd[len]; + if(strcmp(action, "tree") == 0) + snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " tree '%s' '%s'", path, id?id:""); + else { + DYNCFG_CMDS c = dyncfg_cmds2id(action); + if(!id || !*id || !dyncfg_is_valid_id(id)) { + rrd_call_function_error(w->response.data, "invalid id given", HTTP_RESP_BAD_REQUEST); + return HTTP_RESP_BAD_REQUEST; + } + + if(c == DYNCFG_CMD_NONE) { + rrd_call_function_error(w->response.data, "invalid action given", HTTP_RESP_BAD_REQUEST); + return HTTP_RESP_BAD_REQUEST; + } + + if(c == DYNCFG_CMD_ADD || c == DYNCFG_CMD_USERCONFIG || c == DYNCFG_CMD_TEST) { + if(c == DYNCFG_CMD_TEST && (!add_name || !*add_name)) { + // backwards compatibility for TEST without a name + char *colon = strrchr(id, ':'); + if(colon) { + *colon = '\0'; + add_name = ++colon; + } + else + add_name = "test"; + } + + if(!add_name || !*add_name || !dyncfg_is_valid_id(add_name)) { + rrd_call_function_error(w->response.data, "invalid name given", HTTP_RESP_BAD_REQUEST); + return HTTP_RESP_BAD_REQUEST; + } + snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " %s %s %s", id, dyncfg_id2cmd_one(c), add_name); + } + else + snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " %s %s", id, dyncfg_id2cmd_one(c)); + } + + CLEAN_BUFFER *source = buffer_create(100, NULL); + web_client_api_request_vX_source_to_buffer(w, source); + + buffer_flush(w->response.data); + int code = rrd_function_run(host, w->response.data, timeout, w->access, cmd, + true, transaction, + NULL, NULL, + web_client_progress_functions_update, w, + web_client_interrupt_callback, w, + w->payload, buffer_tostring(source), false); + + return code; +} diff --git a/src/web/api/v1/api_v1_context.c b/src/web/api/v1/api_v1_context.c new file mode 100644 index 00000000000000..5b7baf80c028e3 --- /dev/null +++ b/src/web/api/v1/api_v1_context.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_context(RRDHOST *host, struct web_client *w, char *url) { + char *context = NULL; + RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE; + time_t after = 0, before = 0; + const char *chart_label_key = NULL, *chart_labels_filter = NULL; + BUFFER *dimensions = NULL; + + buffer_flush(w->response.data); + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "context") || !strcmp(name, "ctx")) context = value; + else if(!strcmp(name, "after")) after = str2l(value); + else if(!strcmp(name, "before")) before = str2l(value); + else if(!strcmp(name, "options")) options = rrdcontext_to_json_parse_options(value); + else if(!strcmp(name, "chart_label_key")) chart_label_key = value; + else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value; + else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { + if(!dimensions) dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); + buffer_strcat(dimensions, "|"); + buffer_strcat(dimensions, value); + } + } + + if(!context || !*context) { + buffer_sprintf(w->response.data, "No context is given at the request."); + return HTTP_RESP_BAD_REQUEST; + } + + SIMPLE_PATTERN *chart_label_key_pattern = NULL; + SIMPLE_PATTERN *chart_labels_filter_pattern = NULL; + SIMPLE_PATTERN *chart_dimensions_pattern = NULL; + + if(chart_label_key) + chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, true); + + if(chart_labels_filter) + chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, + true); + + if(dimensions) { + chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", + SIMPLE_PATTERN_EXACT, true); + buffer_free(dimensions); + } + + w->response.data->content_type = CT_APPLICATION_JSON; + int ret = rrdcontext_to_json(host, w->response.data, after, before, options, context, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern); + + simple_pattern_free(chart_label_key_pattern); + simple_pattern_free(chart_labels_filter_pattern); + simple_pattern_free(chart_dimensions_pattern); + + return ret; +} diff --git a/src/web/api/v1/api_v1_contexts.c b/src/web/api/v1/api_v1_contexts.c new file mode 100644 index 00000000000000..90d376d47306b2 --- /dev/null +++ b/src/web/api/v1/api_v1_contexts.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_contexts(RRDHOST *host, struct web_client *w, char *url) { + RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE; + time_t after = 0, before = 0; + const char *chart_label_key = NULL, *chart_labels_filter = NULL; + BUFFER *dimensions = NULL; + + buffer_flush(w->response.data); + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "after")) after = str2l(value); + else if(!strcmp(name, "before")) before = str2l(value); + else if(!strcmp(name, "options")) options = rrdcontext_to_json_parse_options(value); + else if(!strcmp(name, "chart_label_key")) chart_label_key = value; + else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value; + else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { + if(!dimensions) dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); + buffer_strcat(dimensions, "|"); + buffer_strcat(dimensions, value); + } + } + + SIMPLE_PATTERN *chart_label_key_pattern = NULL; + SIMPLE_PATTERN *chart_labels_filter_pattern = NULL; + SIMPLE_PATTERN *chart_dimensions_pattern = NULL; + + if(chart_label_key) + chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, true); + + if(chart_labels_filter) + chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, + true); + + if(dimensions) { + chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", + SIMPLE_PATTERN_EXACT, true); + buffer_free(dimensions); + } + + w->response.data->content_type = CT_APPLICATION_JSON; + int ret = rrdcontexts_to_json(host, w->response.data, after, before, options, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern); + + simple_pattern_free(chart_label_key_pattern); + simple_pattern_free(chart_labels_filter_pattern); + simple_pattern_free(chart_dimensions_pattern); + + return ret; +} diff --git a/src/web/api/v1/api_v1_data.c b/src/web/api/v1/api_v1_data.c new file mode 100644 index 00000000000000..30328ed3e5aae5 --- /dev/null +++ b/src/web/api/v1/api_v1_data.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_data(RRDHOST *host, struct web_client *w, char *url) { + netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 data with URL '%s'", w->id, url); + + int ret = HTTP_RESP_BAD_REQUEST; + BUFFER *dimensions = NULL; + + buffer_flush(w->response.data); + + char *google_version = "0.6", + *google_reqId = "0", + *google_sig = "0", + *google_out = "json", + *responseHandler = NULL, + *outFileName = NULL; + + time_t last_timestamp_in_data = 0, google_timestamp = 0; + + char *chart = NULL; + char *before_str = NULL; + char *after_str = NULL; + char *group_time_str = NULL; + char *points_str = NULL; + char *timeout_str = NULL; + char *context = NULL; + char *chart_label_key = NULL; + char *chart_labels_filter = NULL; + char *group_options = NULL; + size_t tier = 0; + RRDR_TIME_GROUPING group = RRDR_GROUPING_AVERAGE; + DATASOURCE_FORMAT format = DATASOURCE_JSON; + RRDR_OPTIONS options = 0; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 data query param '%s' with value '%s'", w->id, name, value); + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "context")) context = value; + else if(!strcmp(name, "chart_label_key")) chart_label_key = value; + else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value; + else if(!strcmp(name, "chart")) chart = value; + else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { + if(!dimensions) dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); + buffer_strcat(dimensions, "|"); + buffer_strcat(dimensions, value); + } + else if(!strcmp(name, "show_dimensions")) options |= RRDR_OPTION_ALL_DIMENSIONS; + else if(!strcmp(name, "after")) after_str = value; + else if(!strcmp(name, "before")) before_str = value; + else if(!strcmp(name, "points")) points_str = value; + else if(!strcmp(name, "timeout")) timeout_str = value; + else if(!strcmp(name, "gtime")) group_time_str = value; + else if(!strcmp(name, "group_options")) group_options = value; + else if(!strcmp(name, "group")) { + group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); + } + else if(!strcmp(name, "format")) { + format = datasource_format_str_to_id(value); + } + else if(!strcmp(name, "options")) { + options |= rrdr_options_parse(value); + } + else if(!strcmp(name, "callback")) { + responseHandler = value; + } + else if(!strcmp(name, "filename")) { + outFileName = value; + } + else if(!strcmp(name, "tqx")) { + // parse Google Visualization API options + // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source + char *tqx_name, *tqx_value; + + while(value) { + tqx_value = strsep_skip_consecutive_separators(&value, ";"); + if(!tqx_value || !*tqx_value) continue; + + tqx_name = strsep_skip_consecutive_separators(&tqx_value, ":"); + if(!tqx_name || !*tqx_name) continue; + if(!tqx_value || !*tqx_value) continue; + + if(!strcmp(tqx_name, "version")) + google_version = tqx_value; + else if(!strcmp(tqx_name, "reqId")) + google_reqId = tqx_value; + else if(!strcmp(tqx_name, "sig")) { + google_sig = tqx_value; + google_timestamp = strtoul(google_sig, NULL, 0); + } + else if(!strcmp(tqx_name, "out")) { + google_out = tqx_value; + format = google_data_format_str_to_id(google_out); + } + else if(!strcmp(tqx_name, "responseHandler")) + responseHandler = tqx_value; + else if(!strcmp(tqx_name, "outFileName")) + outFileName = tqx_value; + } + } + else if(!strcmp(name, "tier")) { + tier = str2ul(value); + if(tier < storage_tiers) + options |= RRDR_OPTION_SELECTED_TIER; + else + tier = 0; + } + } + + // validate the google parameters given + fix_google_param(google_out); + fix_google_param(google_sig); + fix_google_param(google_reqId); + fix_google_param(google_version); + fix_google_param(responseHandler); + fix_google_param(outFileName); + + RRDSET *st = NULL; + ONEWAYALLOC *owa = onewayalloc_create(0); + QUERY_TARGET *qt = NULL; + + if(!is_valid_sp(chart) && !is_valid_sp(context)) { + buffer_sprintf(w->response.data, "No chart or context is given."); + goto cleanup; + } + + if(chart && !context) { + // check if this is a specific chart + st = rrdset_find(host, chart); + if (!st) st = rrdset_find_byname(host, chart); + } + + long long before = (before_str && *before_str)?str2l(before_str):0; + long long after = (after_str && *after_str) ?str2l(after_str):-600; + int points = (points_str && *points_str)?str2i(points_str):0; + int timeout = (timeout_str && *timeout_str)?str2i(timeout_str): 0; + long group_time = (group_time_str && *group_time_str)?str2l(group_time_str):0; + + QUERY_TARGET_REQUEST qtr = { + .version = 1, + .after = after, + .before = before, + .host = host, + .st = st, + .nodes = NULL, + .contexts = context, + .instances = chart, + .dimensions = (dimensions)?buffer_tostring(dimensions):NULL, + .timeout_ms = timeout, + .points = points, + .format = format, + .options = options, + .time_group_method = group, + .time_group_options = group_options, + .resampling_time = group_time, + .tier = tier, + .chart_label_key = chart_label_key, + .labels = chart_labels_filter, + .query_source = QUERY_SOURCE_API_DATA, + .priority = STORAGE_PRIORITY_NORMAL, + .interrupt_callback = web_client_interrupt_callback, + .interrupt_callback_data = w, + .transaction = &w->transaction, + }; + qt = query_target_create(&qtr); + + if(!qt || !qt->query.used) { + buffer_sprintf(w->response.data, "No metrics where matched to query."); + ret = HTTP_RESP_NOT_FOUND; + goto cleanup; + } + + web_client_timeout_checkpoint_set(w, timeout); + if(web_client_timeout_checkpoint_and_check(w, NULL)) { + ret = w->response.code; + goto cleanup; + } + + if(outFileName && *outFileName) { + buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName); + netdata_log_debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName); + } + + if(format == DATASOURCE_DATATABLE_JSONP) { + if(responseHandler == NULL) + responseHandler = "google.visualization.Query.setResponse"; + + netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'", + w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName + ); + + buffer_sprintf( + w->response.data, + "%s({version:'%s',reqId:'%s',status:'ok',sig:'%"PRId64"',table:", + responseHandler, + google_version, + google_reqId, + (int64_t)(st ? st->last_updated.tv_sec : 0)); + } + else if(format == DATASOURCE_JSONP) { + if(responseHandler == NULL) + responseHandler = "callback"; + + buffer_strcat(w->response.data, responseHandler); + buffer_strcat(w->response.data, "("); + } + + ret = data_query_execute(owa, w->response.data, qt, &last_timestamp_in_data); + + if(format == DATASOURCE_DATATABLE_JSONP) { + if(google_timestamp < last_timestamp_in_data) + buffer_strcat(w->response.data, "});"); + + else { + // the client already has the latest data + buffer_flush(w->response.data); + buffer_sprintf(w->response.data, + "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});", + responseHandler, google_version, google_reqId); + } + } + else if(format == DATASOURCE_JSONP) + buffer_strcat(w->response.data, ");"); + + if(qt->internal.relative) + buffer_no_cacheable(w->response.data); + else + buffer_cacheable(w->response.data); + +cleanup: + query_target_release(qt); + onewayalloc_destroy(owa); + buffer_free(dimensions); + return ret; +} diff --git a/src/web/api/v1/api_v1_dbengine.c b/src/web/api/v1/api_v1_dbengine.c new file mode 100644 index 00000000000000..89855f88ad22ca --- /dev/null +++ b/src/web/api/v1/api_v1_dbengine.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +#ifndef ENABLE_DBENGINE +int web_client_api_request_v1_dbengine_stats(RRDHOST *host __maybe_unused, struct web_client *w __maybe_unused, char *url __maybe_unused) { + return HTTP_RESP_NOT_FOUND; +} +#else +static void web_client_api_v1_dbengine_stats_for_tier(BUFFER *wb, size_t tier) { + RRDENG_SIZE_STATS stats = rrdeng_size_statistics(multidb_ctx[tier]); + + buffer_sprintf(wb, + "\n\t\t\"default_granularity_secs\":%zu" + ",\n\t\t\"sizeof_datafile\":%zu" + ",\n\t\t\"sizeof_page_in_cache\":%zu" + ",\n\t\t\"sizeof_point_data\":%zu" + ",\n\t\t\"sizeof_page_data\":%zu" + ",\n\t\t\"pages_per_extent\":%zu" + ",\n\t\t\"datafiles\":%zu" + ",\n\t\t\"extents\":%zu" + ",\n\t\t\"extents_pages\":%zu" + ",\n\t\t\"points\":%zu" + ",\n\t\t\"metrics\":%zu" + ",\n\t\t\"metrics_pages\":%zu" + ",\n\t\t\"extents_compressed_bytes\":%zu" + ",\n\t\t\"pages_uncompressed_bytes\":%zu" + ",\n\t\t\"pages_duration_secs\":%lld" + ",\n\t\t\"single_point_pages\":%zu" + ",\n\t\t\"first_t\":%ld" + ",\n\t\t\"last_t\":%ld" + ",\n\t\t\"database_retention_secs\":%lld" + ",\n\t\t\"average_compression_savings\":%0.2f" + ",\n\t\t\"average_point_duration_secs\":%0.2f" + ",\n\t\t\"average_metric_retention_secs\":%0.2f" + ",\n\t\t\"ephemeral_metrics_per_day_percent\":%0.2f" + ",\n\t\t\"average_page_size_bytes\":%0.2f" + ",\n\t\t\"estimated_concurrently_collected_metrics\":%zu" + ",\n\t\t\"currently_collected_metrics\":%zu" + ",\n\t\t\"disk_space\":%zu" + ",\n\t\t\"max_disk_space\":%zu" + , stats.default_granularity_secs + , stats.sizeof_datafile + , stats.sizeof_page_in_cache + , stats.sizeof_point_data + , stats.sizeof_page_data + , stats.pages_per_extent + , stats.datafiles + , stats.extents + , stats.extents_pages + , stats.points + , stats.metrics + , stats.metrics_pages + , stats.extents_compressed_bytes + , stats.pages_uncompressed_bytes + , (long long)stats.pages_duration_secs + , stats.single_point_pages + , stats.first_time_s + , stats.last_time_s + , (long long)stats.database_retention_secs + , stats.average_compression_savings + , stats.average_point_duration_secs + , stats.average_metric_retention_secs + , stats.ephemeral_metrics_per_day_percent + , stats.average_page_size_bytes + , stats.estimated_concurrently_collected_metrics + , stats.currently_collected_metrics + , stats.disk_space + , stats.max_disk_space + ); +} + +int api_v1_dbengine_stats(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) { + if (!netdata_ready) + return HTTP_RESP_SERVICE_UNAVAILABLE; + + BUFFER *wb = w->response.data; + buffer_flush(wb); + + if(!dbengine_enabled) { + buffer_strcat(wb, "dbengine is not enabled"); + return HTTP_RESP_NOT_FOUND; + } + + wb->content_type = CT_APPLICATION_JSON; + buffer_no_cacheable(wb); + buffer_strcat(wb, "{"); + for(size_t tier = 0; tier < storage_tiers ;tier++) { + buffer_sprintf(wb, "%s\n\t\"tier%zu\": {", tier?",":"", tier); + web_client_api_v1_dbengine_stats_for_tier(wb, tier); + buffer_strcat(wb, "\n\t}"); + } + buffer_strcat(wb, "\n}"); + + return HTTP_RESP_OK; +} +#endif diff --git a/src/web/api/v1/api_v1_function.c b/src/web/api/v1/api_v1_function.c new file mode 100644 index 00000000000000..495f7494b839e4 --- /dev/null +++ b/src/web/api/v1/api_v1_function.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_function(RRDHOST *host, struct web_client *w, char *url) { + if (!netdata_ready) + return HTTP_RESP_SERVICE_UNAVAILABLE; + + int timeout = 0; + const char *function = NULL; + + while (url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) + continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) + continue; + + if (!strcmp(name, "function")) + function = value; + + else if (!strcmp(name, "timeout")) + timeout = (int) strtoul(value, NULL, 0); + } + + BUFFER *wb = w->response.data; + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + buffer_no_cacheable(wb); + + char transaction[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(w->transaction, transaction); + + CLEAN_BUFFER *source = buffer_create(100, NULL); + web_client_api_request_vX_source_to_buffer(w, source); + + return rrd_function_run(host, wb, timeout, w->access, function, true, transaction, + NULL, NULL, + web_client_progress_functions_update, w, + web_client_interrupt_callback, w, NULL, + buffer_tostring(source), false); +} diff --git a/src/web/api/v1/api_v1_functions.c b/src/web/api/v1/api_v1_functions.c new file mode 100644 index 00000000000000..bc1c7df8e0bb90 --- /dev/null +++ b/src/web/api/v1/api_v1_functions.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_functions(RRDHOST *host, struct web_client *w, char *url __maybe_unused) { + if (!netdata_ready) + return HTTP_RESP_SERVICE_UNAVAILABLE; + + BUFFER *wb = w->response.data; + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + buffer_no_cacheable(wb); + + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + host_functions2json(host, wb); + buffer_json_finalize(wb); + + return HTTP_RESP_OK; +} diff --git a/src/web/api/v1/api_v1_info.c b/src/web/api/v1/api_v1_info.c new file mode 100644 index 00000000000000..b14d51190432c6 --- /dev/null +++ b/src/web/api/v1/api_v1_info.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +static void host_collectors(RRDHOST *host, BUFFER *wb) { + buffer_json_member_add_array(wb, "collectors"); + + DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE); + RRDSET *st; + char name[500]; + + time_t now = now_realtime_sec(); + + rrdset_foreach_read(st, host) { + if (!rrdset_is_available_for_viewers(st)) + continue; + + sprintf(name, "%s:%s", rrdset_plugin_name(st), rrdset_module_name(st)); + + bool old = 0; + bool *set = dictionary_set(dict, name, &old, sizeof(bool)); + if(!*set) { + *set = true; + st->last_accessed_time_s = now; + buffer_json_add_array_item_object(wb); + buffer_json_member_add_string(wb, "plugin", rrdset_plugin_name(st)); + buffer_json_member_add_string(wb, "module", rrdset_module_name(st)); + buffer_json_object_close(wb); + } + } + rrdset_foreach_done(st); + dictionary_destroy(dict); + + buffer_json_array_close(wb); +} + +static inline void web_client_api_request_v1_info_mirrored_hosts_status(BUFFER *wb, RRDHOST *host) { + buffer_json_add_array_item_object(wb); + + buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(host)); + buffer_json_member_add_uint64(wb, "hops", host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1); + buffer_json_member_add_boolean(wb, "reachable", (host == localhost || !rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))); + + buffer_json_member_add_string(wb, "guid", host->machine_guid); + buffer_json_member_add_uuid(wb, "node_id", host->node_id); + CLAIM_ID claim_id = rrdhost_claim_id_get(host); + buffer_json_member_add_string(wb, "claim_id", claim_id_is_set(claim_id) ? claim_id.str : NULL); + + buffer_json_object_close(wb); +} + +static inline void web_client_api_request_v1_info_mirrored_hosts(BUFFER *wb) { + RRDHOST *host; + + rrd_rdlock(); + + buffer_json_member_add_array(wb, "mirrored_hosts"); + rrdhost_foreach_read(host) + buffer_json_add_array_item_string(wb, rrdhost_hostname(host)); + buffer_json_array_close(wb); + + buffer_json_member_add_array(wb, "mirrored_hosts_status"); + rrdhost_foreach_read(host) { + if ((host == localhost || !rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) { + web_client_api_request_v1_info_mirrored_hosts_status(wb, host); + } + } + rrdhost_foreach_read(host) { + if ((host != localhost && rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) { + web_client_api_request_v1_info_mirrored_hosts_status(wb, host); + } + } + buffer_json_array_close(wb); + + rrd_rdunlock(); +} + +static void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb, const char *key) { + buffer_json_member_add_object(wb, key); + + size_t normal = 0, warning = 0, critical = 0; + RRDCALC *rc; + foreach_rrdcalc_in_rrdhost_read(host, rc) { + if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec)) + continue; + + switch(rc->status) { + case RRDCALC_STATUS_WARNING: + warning++; + break; + case RRDCALC_STATUS_CRITICAL: + critical++; + break; + default: + normal++; + } + } + foreach_rrdcalc_in_rrdhost_done(rc); + + buffer_json_member_add_uint64(wb, "normal", normal); + buffer_json_member_add_uint64(wb, "warning", warning); + buffer_json_member_add_uint64(wb, "critical", critical); + + buffer_json_object_close(wb); +} + +static int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb) { + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + buffer_json_member_add_string(wb, "version", rrdhost_program_version(host)); + buffer_json_member_add_string(wb, "uid", host->machine_guid); + + buffer_json_member_add_uint64(wb, "hosts-available", rrdhost_hosts_available()); + web_client_api_request_v1_info_mirrored_hosts(wb); + + web_client_api_request_v1_info_summary_alarm_statuses(host, wb, "alarms"); + + buffer_json_member_add_string_or_empty(wb, "os_name", host->system_info->host_os_name); + buffer_json_member_add_string_or_empty(wb, "os_id", host->system_info->host_os_id); + buffer_json_member_add_string_or_empty(wb, "os_id_like", host->system_info->host_os_id_like); + buffer_json_member_add_string_or_empty(wb, "os_version", host->system_info->host_os_version); + buffer_json_member_add_string_or_empty(wb, "os_version_id", host->system_info->host_os_version_id); + buffer_json_member_add_string_or_empty(wb, "os_detection", host->system_info->host_os_detection); + buffer_json_member_add_string_or_empty(wb, "cores_total", host->system_info->host_cores); + buffer_json_member_add_string_or_empty(wb, "total_disk_space", host->system_info->host_disk_space); + buffer_json_member_add_string_or_empty(wb, "cpu_freq", host->system_info->host_cpu_freq); + buffer_json_member_add_string_or_empty(wb, "ram_total", host->system_info->host_ram_total); + + buffer_json_member_add_string_or_omit(wb, "container_os_name", host->system_info->container_os_name); + buffer_json_member_add_string_or_omit(wb, "container_os_id", host->system_info->container_os_id); + buffer_json_member_add_string_or_omit(wb, "container_os_id_like", host->system_info->container_os_id_like); + buffer_json_member_add_string_or_omit(wb, "container_os_version", host->system_info->container_os_version); + buffer_json_member_add_string_or_omit(wb, "container_os_version_id", host->system_info->container_os_version_id); + buffer_json_member_add_string_or_omit(wb, "container_os_detection", host->system_info->container_os_detection); + buffer_json_member_add_string_or_omit(wb, "is_k8s_node", host->system_info->is_k8s_node); + + buffer_json_member_add_string_or_empty(wb, "kernel_name", host->system_info->kernel_name); + buffer_json_member_add_string_or_empty(wb, "kernel_version", host->system_info->kernel_version); + buffer_json_member_add_string_or_empty(wb, "architecture", host->system_info->architecture); + buffer_json_member_add_string_or_empty(wb, "virtualization", host->system_info->virtualization); + buffer_json_member_add_string_or_empty(wb, "virt_detection", host->system_info->virt_detection); + buffer_json_member_add_string_or_empty(wb, "container", host->system_info->container); + buffer_json_member_add_string_or_empty(wb, "container_detection", host->system_info->container_detection); + + buffer_json_member_add_string_or_omit(wb, "cloud_provider_type", host->system_info->cloud_provider_type); + buffer_json_member_add_string_or_omit(wb, "cloud_instance_type", host->system_info->cloud_instance_type); + buffer_json_member_add_string_or_omit(wb, "cloud_instance_region", host->system_info->cloud_instance_region); + + host_labels2json(host, wb, "host_labels"); + host_functions2json(host, wb); + host_collectors(host, wb); + + buffer_json_member_add_boolean(wb, "cloud-enabled", true); + buffer_json_member_add_boolean(wb, "cloud-available", true); + buffer_json_member_add_boolean(wb, "agent-claimed", is_agent_claimed()); + buffer_json_member_add_boolean(wb, "aclk-available", aclk_online()); + + buffer_json_member_add_string(wb, "memory-mode", rrd_memory_mode_name(host->rrd_memory_mode)); +#ifdef ENABLE_DBENGINE + buffer_json_member_add_uint64(wb, "multidb-disk-quota", default_multidb_disk_quota_mb); + buffer_json_member_add_uint64(wb, "page-cache-size", default_rrdeng_page_cache_mb); +#endif // ENABLE_DBENGINE + buffer_json_member_add_boolean(wb, "web-enabled", web_server_mode != WEB_SERVER_MODE_NONE); + buffer_json_member_add_boolean(wb, "stream-enabled", default_rrdpush_enabled); + + buffer_json_member_add_boolean(wb, "stream-compression", + host->sender && host->sender->compressor.initialized); + + buffer_json_member_add_boolean(wb, "https-enabled", true); + + buffer_json_member_add_quoted_string(wb, "buildinfo", analytics_data.netdata_buildinfo); + buffer_json_member_add_quoted_string(wb, "release-channel", analytics_data.netdata_config_release_channel); + buffer_json_member_add_quoted_string(wb, "notification-methods", analytics_data.netdata_notification_methods); + + buffer_json_member_add_boolean(wb, "exporting-enabled", analytics_data.exporting_enabled); + buffer_json_member_add_quoted_string(wb, "exporting-connectors", analytics_data.netdata_exporting_connectors); + + buffer_json_member_add_uint64(wb, "allmetrics-prometheus-used", analytics_data.prometheus_hits); + buffer_json_member_add_uint64(wb, "allmetrics-shell-used", analytics_data.shell_hits); + buffer_json_member_add_uint64(wb, "allmetrics-json-used", analytics_data.json_hits); + buffer_json_member_add_uint64(wb, "dashboard-used", analytics_data.dashboard_hits); + + buffer_json_member_add_uint64(wb, "charts-count", analytics_data.charts_count); + buffer_json_member_add_uint64(wb, "metrics-count", analytics_data.metrics_count); + +#if defined(ENABLE_ML) + buffer_json_member_add_object(wb, "ml-info"); + ml_host_get_info(host, wb); + buffer_json_object_close(wb); +#endif + + buffer_json_finalize(wb); + return 0; +} + +int api_v1_info(RRDHOST *host, struct web_client *w, char *url) { + (void)url; + if (!netdata_ready) return HTTP_RESP_SERVICE_UNAVAILABLE; + BUFFER *wb = w->response.data; + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + + web_client_api_request_v1_info_fill_buffer(host, wb); + + buffer_no_cacheable(wb); + return HTTP_RESP_OK; +} diff --git a/src/web/api/v1/api_v1_manage.c b/src/web/api/v1/api_v1_manage.c new file mode 100644 index 00000000000000..46a12d8bdaf863 --- /dev/null +++ b/src/web/api/v1/api_v1_manage.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +char *api_secret; + +static char *get_mgmt_api_key(void) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/netdata.api.key", netdata_configured_varlib_dir); + char *api_key_filename=config_get(CONFIG_SECTION_REGISTRY, "netdata management api key file", filename); + static char guid[GUID_LEN + 1] = ""; + + if(likely(guid[0])) + return guid; + + // read it from disk + int fd = open(api_key_filename, O_RDONLY | O_CLOEXEC); + if(fd != -1) { + char buf[GUID_LEN + 1]; + if(read(fd, buf, GUID_LEN) != GUID_LEN) + netdata_log_error("Failed to read management API key from '%s'", api_key_filename); + else { + buf[GUID_LEN] = '\0'; + if(regenerate_guid(buf, guid) == -1) { + netdata_log_error("Failed to validate management API key '%s' from '%s'.", + buf, api_key_filename); + + guid[0] = '\0'; + } + } + close(fd); + } + + // generate a new one? + if(!guid[0]) { + nd_uuid_t uuid; + + uuid_generate_time(uuid); + uuid_unparse_lower(uuid, guid); + guid[GUID_LEN] = '\0'; + + // save it + fd = open(api_key_filename, O_WRONLY|O_CREAT|O_TRUNC | O_CLOEXEC, 444); + if(fd == -1) { + netdata_log_error("Cannot create unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file.", api_key_filename); + goto temp_key; + } + + if(write(fd, guid, GUID_LEN) != GUID_LEN) { + netdata_log_error("Cannot write the unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file with enough space left.", api_key_filename); + close(fd); + goto temp_key; + } + + close(fd); + } + + return guid; + +temp_key: + netdata_log_info("You can still continue to use the alarm management API using the authorization token %s during this Netdata session only.", guid); + return guid; +} + +void api_v1_management_init(void) { + api_secret = get_mgmt_api_key(); +} + +#define HLT_MGM "manage/health" +int api_v1_manage(RRDHOST *host, struct web_client *w, char *url) { + const char *haystack = buffer_tostring(w->url_path_decoded); + char *needle; + + buffer_flush(w->response.data); + + if ((needle = strstr(haystack, HLT_MGM)) == NULL) { + buffer_strcat(w->response.data, "Invalid management request. Curently only 'health' is supported."); + return HTTP_RESP_NOT_FOUND; + } + needle += strlen(HLT_MGM); + if (*needle != '\0') { + buffer_strcat(w->response.data, "Invalid management request. Currently only 'health' is supported."); + return HTTP_RESP_NOT_FOUND; + } + return web_client_api_request_v1_mgmt_health(host, w, url); +} diff --git a/src/web/api/v1/api_v1_ml_info.c b/src/web/api/v1/api_v1_ml_info.c new file mode 100644 index 00000000000000..8da1e4eae174f9 --- /dev/null +++ b/src/web/api/v1/api_v1_ml_info.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_ml_info(RRDHOST *host, struct web_client *w, char *url) { + (void) url; +#if defined(ENABLE_ML) + + if (!netdata_ready) + return HTTP_RESP_SERVICE_UNAVAILABLE; + + BUFFER *wb = w->response.data; + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + ml_host_get_detection_info(host, wb); + buffer_json_finalize(wb); + + buffer_no_cacheable(wb); + + return HTTP_RESP_OK; +#else + return HTTP_RESP_SERVICE_UNAVAILABLE; +#endif // ENABLE_ML +} + diff --git a/src/web/api/v1/api_v1_registry.c b/src/web/api/v1/api_v1_registry.c new file mode 100644 index 00000000000000..fa4ce4ca4b5f4e --- /dev/null +++ b/src/web/api/v1/api_v1_registry.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +// Pings a netdata server: +// /api/v1/registry?action=hello +// +// Access to a netdata registry: +// /api/v1/registry?action=access&machine=${machine_guid}&name=${hostname}&url=${url} +// +// Delete from a netdata registry: +// /api/v1/registry?action=delete&machine=${machine_guid}&name=${hostname}&url=${url}&delete_url=${delete_url} +// +// Search for the URLs of a machine: +// /api/v1/registry?action=search&for=${machine_guid} +// +// Impersonate: +// /api/v1/registry?action=switch&machine=${machine_guid}&name=${hostname}&url=${url}&to=${new_person_guid} +int api_v1_registry(RRDHOST *host, struct web_client *w, char *url) { + static uint32_t hash_action = 0, hash_access = 0, hash_hello = 0, hash_delete = 0, hash_search = 0, + hash_switch = 0, hash_machine = 0, hash_url = 0, hash_name = 0, hash_delete_url = 0, hash_for = 0, + hash_to = 0 /*, hash_redirects = 0 */; + + if(unlikely(!hash_action)) { + hash_action = simple_hash("action"); + hash_access = simple_hash("access"); + hash_hello = simple_hash("hello"); + hash_delete = simple_hash("delete"); + hash_search = simple_hash("search"); + hash_switch = simple_hash("switch"); + hash_machine = simple_hash("machine"); + hash_url = simple_hash("url"); + hash_name = simple_hash("name"); + hash_delete_url = simple_hash("delete_url"); + hash_for = simple_hash("for"); + hash_to = simple_hash("to"); + /* + hash_redirects = simple_hash("redirects"); +*/ + } + + netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 registry with URL '%s'", w->id, url); + + // TODO + // The browser may send multiple cookies with our id + + char person_guid[UUID_STR_LEN] = ""; + char *cookie = strstr(w->response.data->buffer, NETDATA_REGISTRY_COOKIE_NAME "="); + if(cookie) + strncpyz(person_guid, &cookie[sizeof(NETDATA_REGISTRY_COOKIE_NAME)], UUID_STR_LEN - 1); + else if(!extract_bearer_token_from_request(w, person_guid, sizeof(person_guid))) + person_guid[0] = '\0'; + + char action = '\0'; + char *machine_guid = NULL, + *machine_url = NULL, + *url_name = NULL, + *search_machine_guid = NULL, + *delete_url = NULL, + *to_person_guid = NULL; + /* + int redirects = 0; +*/ + + // Don't cache registry responses + buffer_no_cacheable(w->response.data); + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) continue; + if (!value || !*value) continue; + + netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 registry query param '%s' with value '%s'", w->id, name, value); + + uint32_t hash = simple_hash(name); + + if(hash == hash_action && !strcmp(name, "action")) { + uint32_t vhash = simple_hash(value); + + if(vhash == hash_access && !strcmp(value, "access")) action = 'A'; + else if(vhash == hash_hello && !strcmp(value, "hello")) action = 'H'; + else if(vhash == hash_delete && !strcmp(value, "delete")) action = 'D'; + else if(vhash == hash_search && !strcmp(value, "search")) action = 'S'; + else if(vhash == hash_switch && !strcmp(value, "switch")) action = 'W'; +#ifdef NETDATA_INTERNAL_CHECKS + else netdata_log_error("unknown registry action '%s'", value); +#endif /* NETDATA_INTERNAL_CHECKS */ + } + /* + else if(hash == hash_redirects && !strcmp(name, "redirects")) + redirects = atoi(value); +*/ + else if(hash == hash_machine && !strcmp(name, "machine")) + machine_guid = value; + + else if(hash == hash_url && !strcmp(name, "url")) + machine_url = value; + + else if(action == 'A') { + if(hash == hash_name && !strcmp(name, "name")) + url_name = value; + } + else if(action == 'D') { + if(hash == hash_delete_url && !strcmp(name, "delete_url")) + delete_url = value; + } + else if(action == 'S') { + if(hash == hash_for && !strcmp(name, "for")) + search_machine_guid = value; + } + else if(action == 'W') { + if(hash == hash_to && !strcmp(name, "to")) + to_person_guid = value; + } +#ifdef NETDATA_INTERNAL_CHECKS + else netdata_log_error("unused registry URL parameter '%s' with value '%s'", name, value); +#endif /* NETDATA_INTERNAL_CHECKS */ + } + + bool do_not_track = respect_web_browser_do_not_track_policy && web_client_has_donottrack(w); + + if(unlikely(action == 'H')) { + // HELLO request, dashboard ACL + analytics_log_dashboard(); + if(unlikely(!http_can_access_dashboard(w))) + return web_client_permission_denied_acl(w); + } + else { + // everything else, registry ACL + if(unlikely(!http_can_access_registry(w))) + return web_client_permission_denied_acl(w); + + if(unlikely(do_not_track)) { + buffer_flush(w->response.data); + buffer_sprintf(w->response.data, "Your web browser is sending 'DNT: 1' (Do Not Track). The registry requires persistent cookies on your browser to work."); + return HTTP_RESP_BAD_REQUEST; + } + } + + buffer_no_cacheable(w->response.data); + + switch(action) { + case 'A': + if(unlikely(!machine_guid || !machine_url || !url_name)) { + netdata_log_error("Invalid registry request - access requires these parameters: machine ('%s'), url ('%s'), name ('%s')", machine_guid ? machine_guid : "UNSET", machine_url ? machine_url : "UNSET", url_name ? url_name : "UNSET"); + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry Access request."); + return HTTP_RESP_BAD_REQUEST; + } + + web_client_enable_tracking_required(w); + return registry_request_access_json(host, w, person_guid, machine_guid, machine_url, url_name, now_realtime_sec()); + + case 'D': + if(unlikely(!machine_guid || !machine_url || !delete_url)) { + netdata_log_error("Invalid registry request - delete requires these parameters: machine ('%s'), url ('%s'), delete_url ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", delete_url?delete_url:"UNSET"); + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry Delete request."); + return HTTP_RESP_BAD_REQUEST; + } + + web_client_enable_tracking_required(w); + return registry_request_delete_json(host, w, person_guid, machine_guid, machine_url, delete_url, now_realtime_sec()); + + case 'S': + if(unlikely(!search_machine_guid)) { + netdata_log_error("Invalid registry request - search requires these parameters: for ('%s')", search_machine_guid?search_machine_guid:"UNSET"); + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry Search request."); + return HTTP_RESP_BAD_REQUEST; + } + + web_client_enable_tracking_required(w); + return registry_request_search_json(host, w, person_guid, search_machine_guid); + + case 'W': + if(unlikely(!machine_guid || !machine_url || !to_person_guid)) { + netdata_log_error("Invalid registry request - switching identity requires these parameters: machine ('%s'), url ('%s'), to ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", to_person_guid?to_person_guid:"UNSET"); + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry Switch request."); + return HTTP_RESP_BAD_REQUEST; + } + + web_client_enable_tracking_required(w); + return registry_request_switch_json(host, w, person_guid, machine_guid, machine_url, to_person_guid, now_realtime_sec()); + + case 'H': + return registry_request_hello_json(host, w, do_not_track); + + default: + buffer_flush(w->response.data); + buffer_strcat(w->response.data, "Invalid registry request - you need to set an action: hello, access, delete, search"); + return HTTP_RESP_BAD_REQUEST; + } +} diff --git a/src/web/api/v1/api_v1_weights.c b/src/web/api/v1/api_v1_weights.c new file mode 100644 index 00000000000000..0d8e5f37157237 --- /dev/null +++ b/src/web/api/v1/api_v1_weights.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v1_calls.h" + +int api_v1_metric_correlations(RRDHOST *host, struct web_client *w, char *url) { + return web_client_api_request_weights(host, w, url, default_metric_correlations_method, WEIGHTS_FORMAT_CHARTS, 1); +} + +int api_v1_weights(RRDHOST *host, struct web_client *w, char *url) { + return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_ANOMALY_RATE, WEIGHTS_FORMAT_CONTEXTS, 1); +} diff --git a/src/web/api/v2/api_v2_alert_config.c b/src/web/api/v2/api_v2_alert_config.c new file mode 100644 index 00000000000000..b4f5344e2e89ad --- /dev/null +++ b/src/web/api/v2/api_v2_alert_config.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_alert_config(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + const char *config = NULL; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "config")) + config = value; + } + + buffer_flush(w->response.data); + + if(!config) { + w->response.data->content_type = CT_TEXT_PLAIN; + buffer_strcat(w->response.data, "A config hash ID is required. Add ?config=UUID query param"); + return HTTP_RESP_BAD_REQUEST; + } + + return contexts_v2_alert_config_to_json(w, config); +} diff --git a/src/web/api/v2/api_v2_alert_transitions.c b/src/web/api/v2/api_v2_alert_transitions.c new file mode 100644 index 00000000000000..e84b80184c6946 --- /dev/null +++ b/src/web/api/v2/api_v2_alert_transitions.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_alert_transitions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal(host, w, url, CONTEXTS_V2_ALERT_TRANSITIONS | CONTEXTS_V2_NODES); +} diff --git a/src/web/api/v2/api_v2_alerts.c b/src/web/api/v2/api_v2_alerts.c new file mode 100644 index 00000000000000..c5d1922e211d68 --- /dev/null +++ b/src/web/api/v2/api_v2_alerts.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_alerts(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal(host, w, url, CONTEXTS_V2_ALERTS | CONTEXTS_V2_NODES); +} diff --git a/src/web/api/v2/api_v2_bearer.c b/src/web/api/v2/api_v2_bearer.c new file mode 100644 index 00000000000000..26cb3c93523eaf --- /dev/null +++ b/src/web/api/v2/api_v2_bearer.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +static bool verify_host_uuids(RRDHOST *host, const char *machine_guid, const char *node_id) { + if(!machine_guid || !node_id) + return false; + + if(strcmp(machine_guid, host->machine_guid) != 0) + return false; + + if(uuid_is_null(host->node_id)) + return false; + + char buf[UUID_STR_LEN]; + uuid_unparse_lower(host->node_id, buf); + + return strcmp(node_id, buf) == 0; +} + +int api_v2_bearer_protection(RRDHOST *host __maybe_unused, struct web_client *w __maybe_unused, char *url) { + char *machine_guid = NULL; + char *claim_id = NULL; + char *node_id = NULL; + bool protection = netdata_is_protected_by_bearer; + + while (url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) continue; + if (!value || !*value) continue; + + if(!strcmp(name, "bearer_protection")) { + if(!strcmp(value, "on") || !strcmp(value, "true") || !strcmp(value, "yes")) + protection = true; + else + protection = false; + } + else if(!strcmp(name, "machine_guid")) + machine_guid = value; + else if(!strcmp(name, "claim_id")) + claim_id = value; + else if(!strcmp(name, "node_id")) + node_id = value; + } + + if(!claim_id_matches(claim_id)) { + buffer_reset(w->response.data); + buffer_strcat(w->response.data, "The request is for a different claimed agent"); + return HTTP_RESP_BAD_REQUEST; + } + + if(!verify_host_uuids(localhost, machine_guid, node_id)) { + buffer_reset(w->response.data); + buffer_strcat(w->response.data, "The request is missing or not matching local UUIDs"); + return HTTP_RESP_BAD_REQUEST; + } + + netdata_is_protected_by_bearer = protection; + + BUFFER *wb = w->response.data; + buffer_reset(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + buffer_json_member_add_boolean(wb, "bearer_protection", netdata_is_protected_by_bearer); + buffer_json_finalize(wb); + + return HTTP_RESP_OK; +} + +int bearer_get_token_json_response(BUFFER *wb, RRDHOST *host, const char *claim_id, const char *machine_guid, const char *node_id, HTTP_USER_ROLE user_role, HTTP_ACCESS access, nd_uuid_t cloud_account_id, const char *client_name) { + if(!claim_id_matches_any(claim_id)) + return rrd_call_function_error(wb, "The request is for a different claimed agent", HTTP_RESP_BAD_REQUEST); + + if(!verify_host_uuids(host, machine_guid, node_id)) + return rrd_call_function_error(wb, "The request is missing or not matching local UUIDs", HTTP_RESP_BAD_REQUEST); + + nd_uuid_t uuid; + time_t expires_s = bearer_create_token(&uuid, user_role, access, cloud_account_id, client_name); + + buffer_reset(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + buffer_json_member_add_int64(wb, "status", HTTP_RESP_OK); + buffer_json_member_add_string(wb, "mg", host->machine_guid); + buffer_json_member_add_boolean(wb, "bearer_protection", netdata_is_protected_by_bearer); + buffer_json_member_add_uuid(wb, "token", uuid); + buffer_json_member_add_time_t(wb, "expiration", expires_s); + buffer_json_finalize(wb); + return HTTP_RESP_OK; +} + +int api_v2_bearer_get_token(RRDHOST *host, struct web_client *w, char *url) { + char *machine_guid = NULL; + char *claim_id = NULL; + char *node_id = NULL; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) continue; + if (!value || !*value) continue; + + if(!strcmp(name, "machine_guid")) + machine_guid = value; + else if(!strcmp(name, "claim_id")) + claim_id = value; + else if(!strcmp(name, "node_id")) + node_id = value; + } + + if(!claim_id_matches(claim_id)) { + buffer_reset(w->response.data); + buffer_strcat(w->response.data, "The request is for a different claimed agent"); + return HTTP_RESP_BAD_REQUEST; + } + + if(!verify_host_uuids(host, machine_guid, node_id)) { + buffer_reset(w->response.data); + buffer_strcat(w->response.data, "The request is missing or not matching local UUIDs"); + return HTTP_RESP_BAD_REQUEST; + } + + if(host != localhost) + return call_function_bearer_get_token(host, w, claim_id, machine_guid, node_id); + + return bearer_get_token_json_response( + w->response.data, + host, + claim_id, + machine_guid, + node_id, + w->user_role, + w->access, + w->auth.cloud_account_id, + w->auth.client_name); +} diff --git a/src/web/api/v2/api_v2_calls.h b/src/web/api/v2/api_v2_calls.h new file mode 100644 index 00000000000000..e40f9f4521904d --- /dev/null +++ b/src/web/api/v2/api_v2_calls.h @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_V2_CALLS_H +#define NETDATA_API_V2_CALLS_H + +#include "../web_api_v2.h" + +int api_v2_info(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_data(RRDHOST *host, struct web_client *w, char *url); +int api_v2_weights(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_alert_config(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_contexts_internal(RRDHOST *host, struct web_client *w, char *url, CONTEXTS_V2_MODE mode); +int api_v2_contexts(RRDHOST *host, struct web_client *w, char *url); +int api_v2_alert_transitions(RRDHOST *host, struct web_client *w, char *url); +int api_v2_alerts(RRDHOST *host, struct web_client *w, char *url); +int api_v2_functions(RRDHOST *host, struct web_client *w, char *url); +int api_v2_versions(RRDHOST *host, struct web_client *w, char *url); +int api_v2_q(RRDHOST *host, struct web_client *w, char *url); +int api_v2_nodes(RRDHOST *host, struct web_client *w, char *url); +int api_v2_node_instances(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_ilove(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_claim(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_webrtc(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_progress(RRDHOST *host, struct web_client *w, char *url); + +int api_v2_bearer_get_token(RRDHOST *host, struct web_client *w, char *url); +int bearer_get_token_json_response(BUFFER *wb, RRDHOST *host, const char *claim_id, const char *machine_guid, const char *node_id, HTTP_USER_ROLE user_role, HTTP_ACCESS access, nd_uuid_t cloud_account_id, const char *client_name); +int api_v2_bearer_protection(RRDHOST *host, struct web_client *w, char *url); + +#endif //NETDATA_API_V2_CALLS_H diff --git a/src/web/api/v2/api_v2_claim.c b/src/web/api/v2/api_v2_claim.c new file mode 100644 index 00000000000000..990920cf1266c5 --- /dev/null +++ b/src/web/api/v2/api_v2_claim.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" +#include "claim/claim.h" + +#if defined(OS_WINDOWS) +#include +#include +#endif + +static char *netdata_random_session_id_filename = NULL; +static nd_uuid_t netdata_random_session_id = { 0 }; + +bool netdata_random_session_id_generate(void) { + static char guid[UUID_STR_LEN] = ""; + + uuid_generate_random(netdata_random_session_id); + uuid_unparse_lower(netdata_random_session_id, guid); + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/netdata_random_session_id", netdata_configured_varlib_dir); + + bool ret = true; + + (void)unlink(filename); + + // save it + int fd = open(filename, O_WRONLY|O_CREAT|O_TRUNC|O_CLOEXEC, 640); + if(fd == -1) { + netdata_log_error("Cannot create random session id file '%s'.", filename); + ret = false; + } + else { + if (write(fd, guid, UUID_STR_LEN - 1) != UUID_STR_LEN - 1) { + netdata_log_error("Cannot write the random session id file '%s'.", filename); + ret = false; + } else { + ssize_t bytes = write(fd, "\n", 1); + UNUSED(bytes); + } + close(fd); + } + + if(ret && (!netdata_random_session_id_filename || strcmp(netdata_random_session_id_filename, filename) != 0)) { + freez(netdata_random_session_id_filename); + netdata_random_session_id_filename = strdupz(filename); + } + + return ret; +} + +static const char *netdata_random_session_id_get_filename(void) { + if(!netdata_random_session_id_filename) + netdata_random_session_id_generate(); + + return netdata_random_session_id_filename; +} + +static bool netdata_random_session_id_matches(const char *guid) { + if(uuid_is_null(netdata_random_session_id)) + return false; + + nd_uuid_t uuid; + + if(uuid_parse(guid, uuid)) + return false; + + if(uuid_compare(netdata_random_session_id, uuid) == 0) + return true; + + return false; +} + +static bool check_claim_param(const char *s) { + if(!s || !*s) return true; + + do { + if(isalnum((uint8_t)*s) || *s == '.' || *s == ',' || *s == '-' || *s == ':' || *s == '/' || *s == '_') + ; + else + return false; + + } while(*++s); + + return true; +} + +int api_v2_claim(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + char *key = NULL; + char *token = NULL; + char *rooms = NULL; + char *base_url = NULL; + + while (url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) continue; + if (!value || !*value) continue; + + if(!strcmp(name, "key")) + key = value; + else if(!strcmp(name, "token")) + token = value; + else if(!strcmp(name, "rooms")) + rooms = value; + else if(!strcmp(name, "url")) + base_url = value; + } + + BUFFER *wb = w->response.data; + buffer_flush(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + time_t now_s = now_realtime_sec(); + CLOUD_STATUS status = buffer_json_cloud_status(wb, now_s); + + bool can_be_claimed = false; + switch(status) { + case CLOUD_STATUS_AVAILABLE: + case CLOUD_STATUS_OFFLINE: + case CLOUD_STATUS_INDIRECT: + can_be_claimed = true; + break; + + case CLOUD_STATUS_BANNED: + case CLOUD_STATUS_ONLINE: + can_be_claimed = false; + break; + } + + buffer_json_member_add_boolean(wb, "can_be_claimed", can_be_claimed); + + if(can_be_claimed && key) { + if(!netdata_random_session_id_matches(key)) { + buffer_reset(wb); + buffer_strcat(wb, "invalid key"); + netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it + return HTTP_RESP_FORBIDDEN; + } + + if(!token || !base_url || !check_claim_param(token) || !check_claim_param(base_url) || (rooms && !check_claim_param(rooms))) { + buffer_reset(wb); + buffer_strcat(wb, "invalid parameters"); + netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it + return HTTP_RESP_BAD_REQUEST; + } + + netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it + + bool success = false; + const char *msg; + if(claim_agent(base_url, token, rooms, cloud_config_proxy_get(), cloud_config_insecure_get())) { + msg = "ok"; + success = true; + can_be_claimed = false; + status = claim_reload_and_wait_online(); + } + else + msg = claim_agent_failure_reason_get(); + + // our status may have changed + // refresh the status in our output + buffer_flush(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + now_s = now_realtime_sec(); + buffer_json_cloud_status(wb, now_s); + + // and this is the status of the claiming command we run + buffer_json_member_add_boolean(wb, "success", success); + buffer_json_member_add_string_or_empty(wb, "message", msg); + } + + if(can_be_claimed) { + const char *filename = netdata_random_session_id_get_filename(); + CLEAN_BUFFER *buffer = buffer_create(0, NULL); + + const char *os_filename; + const char *os_prefix; + const char *os_quote; + const char *os_message; + +#if defined(OS_WINDOWS) + char win_path[MAX_PATH]; + cygwin_conv_path(CCP_POSIX_TO_WIN_A, filename, win_path, sizeof(win_path)); + os_filename = win_path; + os_prefix = "more"; + os_message = "We need to verify this Windows server is yours. So, open a Command Prompt on this server to run the command. It will give you a UUID. Copy and paste this UUID to this box:"; +#else + os_filename = filename; + os_prefix = "sudo cat"; + os_message = "We need to verify this server is yours. SSH to this server and run this command. It will give you a UUID. Copy and paste this UUID to this box:"; +#endif + + // add quotes only when the filename has a space + if(strchr(os_filename, ' ')) + os_quote = "\""; + else + os_quote = ""; + + buffer_sprintf(buffer, "%s %s%s%s", os_prefix, os_quote, os_filename, os_quote); + buffer_json_member_add_string(wb, "key_filename", os_filename); + buffer_json_member_add_string(wb, "cmd", buffer_tostring(buffer)); + buffer_json_member_add_string(wb, "help", os_message); + } + + buffer_json_agents_v2(wb, NULL, now_s, false, false); + buffer_json_finalize(wb); + + return HTTP_RESP_OK; +} diff --git a/src/web/api/v2/api_v2_contexts.c b/src/web/api/v2/api_v2_contexts.c new file mode 100644 index 00000000000000..bbe36ab34768f2 --- /dev/null +++ b/src/web/api/v2/api_v2_contexts.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +// -------------------------------------------------------------------------------------------------------------------- + +int api_v2_contexts_internal(RRDHOST *host __maybe_unused, struct web_client *w, char *url, CONTEXTS_V2_MODE mode) { + struct api_v2_contexts_request req = { 0 }; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "scope_nodes")) + req.scope_nodes = value; + else if(!strcmp(name, "nodes")) + req.nodes = value; + else if((mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) && !strcmp(name, "scope_contexts")) + req.scope_contexts = value; + else if((mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) && !strcmp(name, "contexts")) + req.contexts = value; + else if((mode & CONTEXTS_V2_SEARCH) && !strcmp(name, "q")) + req.q = value; + else if(!strcmp(name, "options")) + req.options = contexts_options_str_to_id(value); + else if(!strcmp(name, "after")) + req.after = str2l(value); + else if(!strcmp(name, "before")) + req.before = str2l(value); + else if(!strcmp(name, "timeout")) + req.timeout_ms = str2l(value); + else if(mode & (CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) { + if (!strcmp(name, "alert")) + req.alerts.alert = value; + else if (!strcmp(name, "transition")) + req.alerts.transition = value; + else if(mode & CONTEXTS_V2_ALERTS) { + if (!strcmp(name, "status")) + req.alerts.status = contexts_alert_status_str_to_id(value); + } + else if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { + if (!strcmp(name, "last")) + req.alerts.last = strtoul(value, NULL, 0); + else if(!strcmp(name, "context")) + req.contexts = value; + else if (!strcmp(name, "anchor_gi")) { + req.alerts.global_id_anchor = str2ull(value, NULL); + } + else { + for(int i = 0; i < ATF_TOTAL_ENTRIES ;i++) { + if(!strcmp(name, alert_transition_facets[i].query_param)) + req.alerts.facets[i] = value; + } + } + } + } + } + + if ((mode & CONTEXTS_V2_ALERT_TRANSITIONS) && !req.alerts.last) + req.alerts.last = 1; + + buffer_flush(w->response.data); + buffer_no_cacheable(w->response.data); + return rrdcontext_to_json_v2(w->response.data, &req, mode); +} + +int api_v2_contexts(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal( + host, w, url, CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS); +} + diff --git a/src/web/api/v2/api_v2_data.c b/src/web/api/v2/api_v2_data.c new file mode 100644 index 00000000000000..4eb54e9adfe6a0 --- /dev/null +++ b/src/web/api/v2/api_v2_data.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +#define GROUP_BY_KEY_MAX_LENGTH 30 +static struct { + char group_by[GROUP_BY_KEY_MAX_LENGTH + 1]; + char aggregation[GROUP_BY_KEY_MAX_LENGTH + 1]; + char group_by_label[GROUP_BY_KEY_MAX_LENGTH + 1]; +} group_by_keys[MAX_QUERY_GROUP_BY_PASSES]; + +__attribute__((constructor)) void initialize_group_by_keys(void) { + for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { + snprintfz(group_by_keys[g].group_by, GROUP_BY_KEY_MAX_LENGTH, "group_by[%zu]", g); + snprintfz(group_by_keys[g].aggregation, GROUP_BY_KEY_MAX_LENGTH, "aggregation[%zu]", g); + snprintfz(group_by_keys[g].group_by_label, GROUP_BY_KEY_MAX_LENGTH, "group_by_label[%zu]", g); + } +} + +int api_v2_data(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + usec_t received_ut = now_monotonic_usec(); + + int ret = HTTP_RESP_BAD_REQUEST; + + buffer_flush(w->response.data); + + char *google_version = "0.6", + *google_reqId = "0", + *google_sig = "0", + *google_out = "json", + *responseHandler = NULL, + *outFileName = NULL; + + time_t last_timestamp_in_data = 0, google_timestamp = 0; + + char *scope_nodes = NULL; + char *scope_contexts = NULL; + char *nodes = NULL; + char *contexts = NULL; + char *instances = NULL; + char *dimensions = NULL; + char *before_str = NULL; + char *after_str = NULL; + char *resampling_time_str = NULL; + char *points_str = NULL; + char *timeout_str = NULL; + char *labels = NULL; + char *alerts = NULL; + char *time_group_options = NULL; + char *tier_str = NULL; + size_t tier = 0; + RRDR_TIME_GROUPING time_group = RRDR_GROUPING_AVERAGE; + DATASOURCE_FORMAT format = DATASOURCE_JSON2; + RRDR_OPTIONS options = RRDR_OPTION_VIRTUAL_POINTS | RRDR_OPTION_JSON_WRAP | RRDR_OPTION_RETURN_JWAR; + + struct group_by_pass group_by[MAX_QUERY_GROUP_BY_PASSES] = { + { + .group_by = RRDR_GROUP_BY_DIMENSION, + .group_by_label = NULL, + .aggregation = RRDR_GROUP_BY_FUNCTION_AVERAGE, + }, + }; + + size_t group_by_idx = 0, group_by_label_idx = 0, aggregation_idx = 0; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "scope_nodes")) scope_nodes = value; + else if(!strcmp(name, "scope_contexts")) scope_contexts = value; + else if(!strcmp(name, "nodes")) nodes = value; + else if(!strcmp(name, "contexts")) contexts = value; + else if(!strcmp(name, "instances")) instances = value; + else if(!strcmp(name, "dimensions")) dimensions = value; + else if(!strcmp(name, "labels")) labels = value; + else if(!strcmp(name, "alerts")) alerts = value; + else if(!strcmp(name, "after")) after_str = value; + else if(!strcmp(name, "before")) before_str = value; + else if(!strcmp(name, "points")) points_str = value; + else if(!strcmp(name, "timeout")) timeout_str = value; + else if(!strcmp(name, "group_by")) { + group_by[group_by_idx++].group_by = group_by_parse(value); + if(group_by_idx >= MAX_QUERY_GROUP_BY_PASSES) + group_by_idx = MAX_QUERY_GROUP_BY_PASSES - 1; + } + else if(!strcmp(name, "group_by_label")) { + group_by[group_by_label_idx++].group_by_label = value; + if(group_by_label_idx >= MAX_QUERY_GROUP_BY_PASSES) + group_by_label_idx = MAX_QUERY_GROUP_BY_PASSES - 1; + } + else if(!strcmp(name, "aggregation")) { + group_by[aggregation_idx++].aggregation = group_by_aggregate_function_parse(value); + if(aggregation_idx >= MAX_QUERY_GROUP_BY_PASSES) + aggregation_idx = MAX_QUERY_GROUP_BY_PASSES - 1; + } + else if(!strcmp(name, "format")) format = datasource_format_str_to_id(value); + else if(!strcmp(name, "options")) options |= rrdr_options_parse(value); + else if(!strcmp(name, "time_group")) time_group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); + else if(!strcmp(name, "time_group_options")) time_group_options = value; + else if(!strcmp(name, "time_resampling")) resampling_time_str = value; + else if(!strcmp(name, "tier")) tier_str = value; + else if(!strcmp(name, "callback")) responseHandler = value; + else if(!strcmp(name, "filename")) outFileName = value; + else if(!strcmp(name, "tqx")) { + // parse Google Visualization API options + // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source + char *tqx_name, *tqx_value; + + while(value) { + tqx_value = strsep_skip_consecutive_separators(&value, ";"); + if(!tqx_value || !*tqx_value) continue; + + tqx_name = strsep_skip_consecutive_separators(&tqx_value, ":"); + if(!tqx_name || !*tqx_name) continue; + if(!tqx_value || !*tqx_value) continue; + + if(!strcmp(tqx_name, "version")) + google_version = tqx_value; + else if(!strcmp(tqx_name, "reqId")) + google_reqId = tqx_value; + else if(!strcmp(tqx_name, "sig")) { + google_sig = tqx_value; + google_timestamp = strtoul(google_sig, NULL, 0); + } + else if(!strcmp(tqx_name, "out")) { + google_out = tqx_value; + format = google_data_format_str_to_id(google_out); + } + else if(!strcmp(tqx_name, "responseHandler")) + responseHandler = tqx_value; + else if(!strcmp(tqx_name, "outFileName")) + outFileName = tqx_value; + } + } + else { + for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { + if(!strcmp(name, group_by_keys[g].group_by)) + group_by[g].group_by = group_by_parse(value); + else if(!strcmp(name, group_by_keys[g].group_by_label)) + group_by[g].group_by_label = value; + else if(!strcmp(name, group_by_keys[g].aggregation)) + group_by[g].aggregation = group_by_aggregate_function_parse(value); + } + } + } + + // validate the google parameters given + fix_google_param(google_out); + fix_google_param(google_sig); + fix_google_param(google_reqId); + fix_google_param(google_version); + fix_google_param(responseHandler); + fix_google_param(outFileName); + + for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { + if (group_by[g].group_by_label && *group_by[g].group_by_label) + group_by[g].group_by |= RRDR_GROUP_BY_LABEL; + } + + if(group_by[0].group_by == RRDR_GROUP_BY_NONE) + group_by[0].group_by = RRDR_GROUP_BY_DIMENSION; + + for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { + if ((group_by[g].group_by & ~(RRDR_GROUP_BY_DIMENSION)) || (options & RRDR_OPTION_PERCENTAGE)) { + options |= RRDR_OPTION_ABSOLUTE; + break; + } + } + + if(options & RRDR_OPTION_DEBUG) + options &= ~RRDR_OPTION_MINIFY; + + if(tier_str && *tier_str) { + tier = str2ul(tier_str); + if(tier < storage_tiers) + options |= RRDR_OPTION_SELECTED_TIER; + else + tier = 0; + } + + time_t before = (before_str && *before_str)?str2l(before_str):0; + time_t after = (after_str && *after_str) ?str2l(after_str):-600; + size_t points = (points_str && *points_str)?str2u(points_str):0; + int timeout = (timeout_str && *timeout_str)?str2i(timeout_str): 0; + time_t resampling_time = (resampling_time_str && *resampling_time_str) ? str2l(resampling_time_str) : 0; + + QUERY_TARGET_REQUEST qtr = { + .version = 2, + .scope_nodes = scope_nodes, + .scope_contexts = scope_contexts, + .after = after, + .before = before, + .host = NULL, + .st = NULL, + .nodes = nodes, + .contexts = contexts, + .instances = instances, + .dimensions = dimensions, + .alerts = alerts, + .timeout_ms = timeout, + .points = points, + .format = format, + .options = options, + .time_group_method = time_group, + .time_group_options = time_group_options, + .resampling_time = resampling_time, + .tier = tier, + .chart_label_key = NULL, + .labels = labels, + .query_source = QUERY_SOURCE_API_DATA, + .priority = STORAGE_PRIORITY_NORMAL, + .received_ut = received_ut, + + .interrupt_callback = web_client_interrupt_callback, + .interrupt_callback_data = w, + + .transaction = &w->transaction, + }; + + for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) + qtr.group_by[g] = group_by[g]; + + QUERY_TARGET *qt = query_target_create(&qtr); + ONEWAYALLOC *owa = NULL; + + if(!qt) { + buffer_sprintf(w->response.data, "Failed to prepare the query."); + ret = HTTP_RESP_INTERNAL_SERVER_ERROR; + goto cleanup; + } + + web_client_timeout_checkpoint_set(w, timeout); + if(web_client_timeout_checkpoint_and_check(w, NULL)) { + ret = w->response.code; + goto cleanup; + } + + if(outFileName && *outFileName) { + buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName); + netdata_log_debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName); + } + + if(format == DATASOURCE_DATATABLE_JSONP) { + if(responseHandler == NULL) + responseHandler = "google.visualization.Query.setResponse"; + + netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'", + w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName + ); + + buffer_sprintf( + w->response.data, + "%s({version:'%s',reqId:'%s',status:'ok',sig:'%"PRId64"',table:", + responseHandler, + google_version, + google_reqId, + (int64_t)now_realtime_sec()); + } + else if(format == DATASOURCE_JSONP) { + if(responseHandler == NULL) + responseHandler = "callback"; + + buffer_strcat(w->response.data, responseHandler); + buffer_strcat(w->response.data, "("); + } + + owa = onewayalloc_create(0); + ret = data_query_execute(owa, w->response.data, qt, &last_timestamp_in_data); + + if(format == DATASOURCE_DATATABLE_JSONP) { + if(google_timestamp < last_timestamp_in_data) + buffer_strcat(w->response.data, "});"); + + else { + // the client already has the latest data + buffer_flush(w->response.data); + buffer_sprintf(w->response.data, + "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});", + responseHandler, google_version, google_reqId); + } + } + else if(format == DATASOURCE_JSONP) + buffer_strcat(w->response.data, ");"); + + if(qt->internal.relative) + buffer_no_cacheable(w->response.data); + else + buffer_cacheable(w->response.data); + +cleanup: + query_target_release(qt); + onewayalloc_destroy(owa); + return ret; +} diff --git a/src/web/api/v2/api_v2_functions.c b/src/web/api/v2/api_v2_functions.c new file mode 100644 index 00000000000000..286efd130b94ce --- /dev/null +++ b/src/web/api/v2/api_v2_functions.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_functions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal( + host, w, url, CONTEXTS_V2_FUNCTIONS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS); +} diff --git a/src/web/api/ilove/README.md b/src/web/api/v2/api_v2_ilove/README.md similarity index 100% rename from src/web/api/ilove/README.md rename to src/web/api/v2/api_v2_ilove/README.md diff --git a/src/web/api/ilove/ilove.c b/src/web/api/v2/api_v2_ilove/ilove.c similarity index 99% rename from src/web/api/ilove/ilove.c rename to src/web/api/v2/api_v2_ilove/ilove.c index 67489ec42df7e4..501e001234a6f0 100644 --- a/src/web/api/ilove/ilove.c +++ b/src/web/api/v2/api_v2_ilove/ilove.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "ilove.h" +#include "../api_v2_calls.h" static const unsigned short int ibm_plex_sans_bold_250[128][128] = { {0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* */, 0 /* ! */, 0 /* " */, 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, 0 /* ( */, 0 /* ) */, 0 /* * */, 0 /* + */, 0 /* , */, 0 /* - */, 0 /* . */, 0 /* / */, 0 /* 0 */, 0 /* 1 */, 0 /* 2 */, 0 /* 3 */, 0 /* 4 */, 0 /* 5 */, 0 /* 6 */, 0 /* 7 */, 0 /* 8 */, 0 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, 0 /* _ */, 0 /* ` */, 0 /* a */, 0 /* b */, 0 /* c */, 0 /* d */, 0 /* e */, 0 /* f */, 0 /* g */, 0 /* h */, 0 /* i */, 0 /* j */, 0 /* k */, 0 /* l */, 0 /* m */, 0 /* n */, 0 /* o */, 0 /* p */, 0 /* q */, 0 /* r */, 0 /* s */, 0 /* t */, 0 /* u */, 0 /* v */, 0 /* w */, 0 /* x */, 0 /* y */, 0 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 0 /* ~ */}, @@ -277,7 +277,7 @@ static void generate_ilove_svg(BUFFER *wb, const char *love) { wb->content_type = CT_IMAGE_SVG_XML; } -int web_client_api_request_v2_ilove(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { +int api_v2_ilove(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { char *love = "TROUBLE"; while(url) { diff --git a/src/web/api/ilove/measure-text.js b/src/web/api/v2/api_v2_ilove/measure-text.js similarity index 100% rename from src/web/api/ilove/measure-text.js rename to src/web/api/v2/api_v2_ilove/measure-text.js diff --git a/src/web/api/v2/api_v2_info.c b/src/web/api/v2/api_v2_info.c new file mode 100644 index 00000000000000..fd2aba63345d19 --- /dev/null +++ b/src/web/api/v2/api_v2_info.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_info(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal(host, w, url, CONTEXTS_V2_AGENTS | CONTEXTS_V2_AGENTS_INFO); +} diff --git a/src/web/api/v2/api_v2_node_instances.c b/src/web/api/v2/api_v2_node_instances.c new file mode 100644 index 00000000000000..0371914329dddd --- /dev/null +++ b/src/web/api/v2/api_v2_node_instances.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_node_instances(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal( + host, w, url, + CONTEXTS_V2_NODES | CONTEXTS_V2_NODE_INSTANCES | CONTEXTS_V2_AGENTS | + CONTEXTS_V2_AGENTS_INFO | CONTEXTS_V2_VERSIONS); +} diff --git a/src/web/api/v2/api_v2_nodes.c b/src/web/api/v2/api_v2_nodes.c new file mode 100644 index 00000000000000..3880f279f21a97 --- /dev/null +++ b/src/web/api/v2/api_v2_nodes.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_nodes(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal(host, w, url, CONTEXTS_V2_NODES | CONTEXTS_V2_NODES_INFO); +} diff --git a/src/web/api/v2/api_v2_progress.c b/src/web/api/v2/api_v2_progress.c new file mode 100644 index 00000000000000..ebb53ca8812321 --- /dev/null +++ b/src/web/api/v2/api_v2_progress.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_progress(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + char *transaction = NULL; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "transaction")) transaction = value; + } + + nd_uuid_t tr; + uuid_parse_flexi(transaction, tr); + + rrd_function_call_progresser(&tr); + + return web_api_v2_report_progress(&tr, w->response.data); +} diff --git a/src/web/api/v2/api_v2_q.c b/src/web/api/v2/api_v2_q.c new file mode 100644 index 00000000000000..57fcec7ddb1f81 --- /dev/null +++ b/src/web/api/v2/api_v2_q.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_q(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal( + host, w, url, + CONTEXTS_V2_SEARCH | CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS); +} diff --git a/src/web/api/v2/api_v2_versions.c b/src/web/api/v2/api_v2_versions.c new file mode 100644 index 00000000000000..299e7a30c6b29c --- /dev/null +++ b/src/web/api/v2/api_v2_versions.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int api_v2_versions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_v2_contexts_internal(host, w, url, CONTEXTS_V2_VERSIONS); +} diff --git a/src/web/api/v2/api_v2_webrtc.c b/src/web/api/v2/api_v2_webrtc.c new file mode 100644 index 00000000000000..dcd383d472aec9 --- /dev/null +++ b/src/web/api/v2/api_v2_webrtc.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" +#include "../../rtc/webrtc.h" + +int api_v2_webrtc(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) { + return webrtc_new_connection(buffer_tostring(w->payload), w->response.data); +} diff --git a/src/web/api/v2/api_v2_weights.c b/src/web/api/v2/api_v2_weights.c new file mode 100644 index 00000000000000..442c8b75a60bc0 --- /dev/null +++ b/src/web/api/v2/api_v2_weights.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v2_calls.h" + +int web_client_api_request_weights(RRDHOST *host, struct web_client *w, char *url, WEIGHTS_METHOD method, WEIGHTS_FORMAT format, size_t api_version) { + if (!netdata_ready) + return HTTP_RESP_SERVICE_UNAVAILABLE; + + time_t baseline_after = 0, baseline_before = 0, after = 0, before = 0; + size_t points = 0; + RRDR_OPTIONS options = 0; + RRDR_TIME_GROUPING time_group_method = RRDR_GROUPING_AVERAGE; + time_t timeout_ms = 0; + size_t tier = 0; + const char *time_group_options = NULL, *scope_contexts = NULL, *scope_nodes = NULL, *contexts = NULL, *nodes = NULL, + *instances = NULL, *dimensions = NULL, *labels = NULL, *alerts = NULL; + + struct group_by_pass group_by = { + .group_by = RRDR_GROUP_BY_NONE, + .group_by_label = NULL, + .aggregation = RRDR_GROUP_BY_FUNCTION_AVERAGE, + }; + + while (url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if (!value || !*value) + continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) + continue; + if (!value || !*value) + continue; + + if (!strcmp(name, "baseline_after")) + baseline_after = str2l(value); + + else if (!strcmp(name, "baseline_before")) + baseline_before = str2l(value); + + else if (!strcmp(name, "after") || !strcmp(name, "highlight_after")) + after = str2l(value); + + else if (!strcmp(name, "before") || !strcmp(name, "highlight_before")) + before = str2l(value); + + else if (!strcmp(name, "points") || !strcmp(name, "max_points")) + points = str2ul(value); + + else if (!strcmp(name, "timeout")) + timeout_ms = str2l(value); + + else if((api_version == 1 && !strcmp(name, "group")) || (api_version >= 2 && !strcmp(name, "time_group"))) + time_group_method = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); + + else if((api_version == 1 && !strcmp(name, "group_options")) || (api_version >= 2 && !strcmp(name, "time_group_options"))) + time_group_options = value; + + else if(!strcmp(name, "options")) + options |= rrdr_options_parse(value); + + else if(!strcmp(name, "method")) + method = weights_string_to_method(value); + + else if(api_version == 1 && (!strcmp(name, "context") || !strcmp(name, "contexts"))) + scope_contexts = value; + + else if(api_version >= 2 && !strcmp(name, "scope_nodes")) scope_nodes = value; + else if(api_version >= 2 && !strcmp(name, "scope_contexts")) scope_contexts = value; + else if(api_version >= 2 && !strcmp(name, "nodes")) nodes = value; + else if(api_version >= 2 && !strcmp(name, "contexts")) contexts = value; + else if(api_version >= 2 && !strcmp(name, "instances")) instances = value; + else if(api_version >= 2 && !strcmp(name, "dimensions")) dimensions = value; + else if(api_version >= 2 && !strcmp(name, "labels")) labels = value; + else if(api_version >= 2 && !strcmp(name, "alerts")) alerts = value; + else if(api_version >= 2 && (!strcmp(name, "group_by") || !strcmp(name, "group_by[0]"))) { + group_by.group_by = group_by_parse(value); + } + else if(api_version >= 2 && (!strcmp(name, "group_by_label") || !strcmp(name, "group_by_label[0]"))) { + group_by.group_by_label = value; + } + else if(api_version >= 2 && (!strcmp(name, "aggregation") || !strcmp(name, "aggregation[0]"))) { + group_by.aggregation = group_by_aggregate_function_parse(value); + } + + else if(!strcmp(name, "tier")) { + tier = str2ul(value); + if(tier < storage_tiers) + options |= RRDR_OPTION_SELECTED_TIER; + else + tier = 0; + } + } + + if(options == 0) + // the user did not set any options + options = RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NULL2ZERO | RRDR_OPTION_NONZERO; + else + // the user set some options, add also these + options |= RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NULL2ZERO; + + if(options & RRDR_OPTION_PERCENTAGE) + options |= RRDR_OPTION_ABSOLUTE; + + if(options & RRDR_OPTION_DEBUG) + options &= ~RRDR_OPTION_MINIFY; + + BUFFER *wb = w->response.data; + buffer_flush(wb); + wb->content_type = CT_APPLICATION_JSON; + + QUERY_WEIGHTS_REQUEST qwr = { + .version = api_version, + .host = (api_version == 1) ? NULL : host, + .scope_nodes = scope_nodes, + .scope_contexts = scope_contexts, + .nodes = nodes, + .contexts = contexts, + .instances = instances, + .dimensions = dimensions, + .labels = labels, + .alerts = alerts, + .group_by = { + .group_by = group_by.group_by, + .group_by_label = group_by.group_by_label, + .aggregation = group_by.aggregation, + }, + .method = method, + .format = format, + .time_group_method = time_group_method, + .time_group_options = time_group_options, + .baseline_after = baseline_after, + .baseline_before = baseline_before, + .after = after, + .before = before, + .points = points, + .options = options, + .tier = tier, + .timeout_ms = timeout_ms, + + .interrupt_callback = web_client_interrupt_callback, + .interrupt_callback_data = w, + + .transaction = &w->transaction, + }; + + return web_api_v12_weights(wb, &qwr); +} + +int api_v2_weights(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_VALUE, WEIGHTS_FORMAT_MULTINODE, 2); +} diff --git a/src/web/api/v3/api_v3_calls.h b/src/web/api/v3/api_v3_calls.h new file mode 100644 index 00000000000000..4cee766fd2c34a --- /dev/null +++ b/src/web/api/v3/api_v3_calls.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_V3_CALLS_H +#define NETDATA_API_V3_CALLS_H + +#include "../web_api_v3.h" + +int api_v3_settings(RRDHOST *host, struct web_client *w, char *url); +int api_v3_me(RRDHOST *host, struct web_client *w, char *url); + +#endif //NETDATA_API_V3_CALLS_H diff --git a/src/web/api/v3/api_v3_me.c b/src/web/api/v3/api_v3_me.c new file mode 100644 index 00000000000000..39ba2c29baccbb --- /dev/null +++ b/src/web/api/v3/api_v3_me.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "api_v3_calls.h" + +int api_v3_me(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) { + BUFFER *wb = w->response.data; + buffer_reset(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + + const char *auth; + switch(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_CLOUD|WEB_CLIENT_FLAG_AUTH_BEARER|WEB_CLIENT_FLAG_AUTH_GOD)) { + case WEB_CLIENT_FLAG_AUTH_CLOUD: + auth = "cloud"; + break; + + case WEB_CLIENT_FLAG_AUTH_BEARER: + auth = "bearer"; + break; + + case WEB_CLIENT_FLAG_AUTH_GOD: + auth = "god"; + break; + + default: + auth = "none"; + break; + } + buffer_json_member_add_string(wb, "auth", auth); + + buffer_json_member_add_uuid(wb, "cloud_account_id", w->auth.cloud_account_id); + buffer_json_member_add_string(wb, "client_name", w->auth.client_name); + http_access2buffer_json_array(wb, "access", w->access); + buffer_json_member_add_string(wb, "user_role", http_id2user_role(w->user_role)); + + buffer_json_finalize(wb); + return HTTP_RESP_OK; +} diff --git a/src/web/api/v3/api_v3_settings.c b/src/web/api/v3/api_v3_settings.c new file mode 100644 index 00000000000000..cff584f8417d90 --- /dev/null +++ b/src/web/api/v3/api_v3_settings.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +/* + * /api/v3/settings + * + * QUERY STRING PARAMETERS: + * - file=a file name (alphanumerics, dashes, underscores) + * When the user is not authenticated with a bearer token + * only the 'default' file is allowed. + * Authenticated users can create, store and update any + * settings file. + * + * HTTP METHODS + * - GET to retrieve a file + * - PUT to create or update a file + * + * PAYLOAD + * - The payload MUST have the member 'version'. + * - The payload MAY have anything else. + * - The maximum payload size in JSON is 20MiB. + * - When updating the payload, the caller must specify the + * version of the existing file. If this check fails, + * Netdata will return 409 (conflict). + * When the caller receives 409, it means there are updates + * in the payload outside its control and the object MUST + * be loaded again to find its current version to update it. + * After loading it, the caller must reapply the changes and + * PUT it again. + * - Netdata will increase the version on every PUT action. + * So, the payload MUST specify the version found on disk + * but, Netdata will increment the version before saving it. + */ + +#include "api_v3_calls.h" + +#define MAX_SETTINGS_SIZE_BYTES (20 * 1024 * 1024) + +// we need an r/w spinlock to ensure that reads and write do not happen +// concurrently for settings files +static RW_SPINLOCK settings_spinlock = NETDATA_RW_SPINLOCK_INITIALIZER; + +static inline void settings_path(char out[FILENAME_MAX]) { + filename_from_path_entry(out, netdata_configured_varlib_dir, "settings", NULL); +} + +static inline void settings_filename(char out[FILENAME_MAX], const char *file, const char *extension) { + char path[FILENAME_MAX]; + settings_path(path); + filename_from_path_entry(out, path, file, extension); +} + +static inline bool settings_ensure_path_exists(void) { + char path[FILENAME_MAX]; + settings_path(path); + return filename_is_dir(path, true); +} + +static inline size_t settings_extract_json_version(const char *json) { + if(!json || !*json) return 0; + + // Parse the JSON string into a JSON-C object + CLEAN_JSON_OBJECT *jobj = json_tokener_parse(json); + if (jobj == NULL) + return 0; + + // Access the "version" field + struct json_object *version_obj; + if (json_object_object_get_ex(jobj, "version", &version_obj)) + // Extract the integer value of the version + return (size_t)json_object_get_int(version_obj); + + return 0; +} + +static inline void settings_initial_version(BUFFER *wb) { + buffer_reset(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + buffer_json_member_add_uint64(wb, "version", 1); + buffer_json_finalize(wb); +} + +static inline void settings_get(BUFFER *wb, const char *file, bool have_lock) { + char filename[FILENAME_MAX]; + settings_filename(filename, file, NULL); + + buffer_reset(wb); + + if(!have_lock) + rw_spinlock_read_lock(&settings_spinlock); + + bool rc = read_txt_file_to_buffer(filename, wb, MAX_SETTINGS_SIZE_BYTES); + + if(!have_lock) + rw_spinlock_read_unlock(&settings_spinlock); + + if(rc) { + size_t version = settings_extract_json_version(buffer_tostring(wb)); + if (!version) { + nd_log(NDLS_DAEMON, NDLP_ERR, "file '%s' cannot be parsed to extract version", filename); + settings_initial_version(wb); + } + else { + wb->content_type = CT_APPLICATION_JSON; + buffer_no_cacheable(wb); + } + } + else + settings_initial_version(wb); +} + +static inline size_t settings_get_version(const char *path, bool have_lock) { + CLEAN_BUFFER *wb = buffer_create(0, NULL); + settings_get(wb, path, have_lock); + + return settings_extract_json_version(buffer_tostring(wb)); +} + +static inline int settings_put(struct web_client *w, char *file) { + rw_spinlock_write_lock(&settings_spinlock); + + if(!settings_ensure_path_exists()) { + rw_spinlock_write_unlock(&settings_spinlock); + return rrd_call_function_error( + w->response.data, + "Settings path cannot be created or accessed.", + HTTP_RESP_BAD_REQUEST); + } + + size_t old_version = settings_get_version(file, true); + + // Parse the JSON string into a JSON-C object + CLEAN_JSON_OBJECT *jobj = json_tokener_parse(buffer_tostring(w->payload)); + if (jobj == NULL) { + rw_spinlock_write_unlock(&settings_spinlock); + return rrd_call_function_error( + w->response.data, + "Payload cannot be parsed as a JSON object", + HTTP_RESP_BAD_REQUEST); + } + + // Access the "version" field + struct json_object *version_obj; + if (!json_object_object_get_ex(jobj, "version", &version_obj)) { + rw_spinlock_write_unlock(&settings_spinlock); + return rrd_call_function_error( + w->response.data, + "Field version is not found in payload", + HTTP_RESP_BAD_REQUEST); + } + + size_t new_version = (size_t)json_object_get_int(version_obj); + + if (old_version != new_version) { + rw_spinlock_write_unlock(&settings_spinlock); + return rrd_call_function_error( + w->response.data, + "Payload version does not match the version of the stored object", + HTTP_RESP_CONFLICT); + } + + new_version++; + // Set the new version back into the JSON object + json_object_object_add(jobj, "version", json_object_new_int((int)new_version)); + + // Convert the updated JSON object back to a string + const char *updated_json_str = json_object_to_json_string(jobj); + + char tmp_filename[FILENAME_MAX]; + settings_filename(tmp_filename, file, "new"); + + // Save the updated JSON string to a file + FILE *fp = fopen(tmp_filename, "w"); + if (fp == NULL) { + rw_spinlock_write_unlock(&settings_spinlock); + nd_log(NDLS_DAEMON, NDLP_ERR, "cannot open/create settings file '%s'", tmp_filename); + return rrd_call_function_error( + w->response.data, + "Cannot create payload file '%s'", + HTTP_RESP_INTERNAL_SERVER_ERROR); + } + size_t len = strlen(updated_json_str); + if(fwrite(updated_json_str, 1, len, fp) != len) { + fclose(fp); + unlink(tmp_filename); + rw_spinlock_write_unlock(&settings_spinlock); + nd_log(NDLS_DAEMON, NDLP_ERR, "cannot save settings to file '%s'", tmp_filename); + return rrd_call_function_error( + w->response.data, + "Cannot save payload to file '%s'", + HTTP_RESP_INTERNAL_SERVER_ERROR); + } + fclose(fp); + + char filename[FILENAME_MAX]; + settings_filename(filename, file, NULL); + + bool renamed = rename(tmp_filename, filename) == 0; + + rw_spinlock_write_unlock(&settings_spinlock); + + if(!renamed) { + nd_log(NDLS_DAEMON, NDLP_ERR, "cannot rename file '%s' to '%s'", tmp_filename, filename); + return rrd_call_function_error( + w->response.data, + "Failed to move the payload file to its final location", + HTTP_RESP_INTERNAL_SERVER_ERROR); + } + + return rrd_call_function_error( + w->response.data, + "OK", + HTTP_RESP_OK); +} + +static inline bool is_settings_file_valid(char *file) { + char *s = file; + + if(!s || !*s) + return false; + + while(*s) { + if(!isalnum(*s) && *s != '-' && *s != '_') + return false; + s++; + } + + return true; +} + +int api_v3_settings(RRDHOST *host, struct web_client *w, char *url) { + char *file = NULL; + + while(url) { + char *value = strsep_skip_consecutive_separators(&url, "&"); + if(!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if(!name || !*name) continue; + if(!value || !*value) continue; + + // name and value are now the parameters + // they are not null and not empty + + if(!strcmp(name, "file")) + file = value; + } + + if(!is_settings_file_valid(file)) + return rrd_call_function_error( + w->response.data, + "Invalid settings file given.", + HTTP_RESP_BAD_REQUEST); + + if(host != localhost) + return rrd_call_function_error( + w->response.data, + "Settings API is only allowed for the agent node.", + HTTP_RESP_BAD_REQUEST); + + if(web_client_flags_check_auth(w) != WEB_CLIENT_FLAG_AUTH_BEARER && strcmp(file, "default") != 0) + return rrd_call_function_error( + w->response.data, + "Only the 'default' settings file is allowed for unauthenticated users", + HTTP_RESP_BAD_REQUEST); + + switch(w->mode) { + case HTTP_REQUEST_MODE_GET: + settings_get(w->response.data, file, false); + return HTTP_RESP_OK; + + case HTTP_REQUEST_MODE_PUT: + if(!w->payload || !buffer_strlen(w->payload)) + return rrd_call_function_error( + w->response.data, + "Settings API PUT action requires a payload.", + HTTP_RESP_BAD_REQUEST); + + return settings_put(w, file); + + default: + return rrd_call_function_error(w->response.data, + "Invalid HTTP mode. HTTP modes GET and PUT are supported.", + HTTP_RESP_BAD_REQUEST); + } +} diff --git a/src/web/api/web_api.c b/src/web/api/web_api.c index 4e936be5b69823..b7090e030a5af6 100644 --- a/src/web/api/web_api.c +++ b/src/web/api/web_api.c @@ -2,6 +2,12 @@ #include "web_api.h" +void host_labels2json(RRDHOST *host, BUFFER *wb, const char *key) { + buffer_json_member_add_object(wb, key); + rrdlabels_to_buffer_json_members(host->rrdlabels, wb); + buffer_json_object_close(wb); +} + int web_client_api_request_vX(RRDHOST *host, struct web_client *w, char *url_path_endpoint, struct web_api_command *api_commands) { buffer_no_cacheable(w->response.data); @@ -11,15 +17,16 @@ int web_client_api_request_vX(RRDHOST *host, struct web_client *w, char *url_pat internal_fatal(!web_client_flags_check_auth(w) && (w->access & HTTP_ACCESS_SIGNED_ID), "signed-in permission is set, but it shouldn't"); +#ifdef NETDATA_GOD_MODE + web_client_set_permissions(w, HTTP_ACCESS_ALL, HTTP_USER_ROLE_ADMIN, WEB_CLIENT_FLAG_AUTH_GOD); +#else if(!web_client_flags_check_auth(w)) { - w->user_role = (netdata_is_protected_by_bearer) ? HTTP_USER_ROLE_NONE : HTTP_USER_ROLE_ANY; - w->access = (netdata_is_protected_by_bearer) ? HTTP_ACCESS_NONE : HTTP_ACCESS_ANONYMOUS_DATA; + web_client_set_permissions( + w, + (netdata_is_protected_by_bearer) ? HTTP_ACCESS_NONE : HTTP_ACCESS_ANONYMOUS_DATA, + (netdata_is_protected_by_bearer) ? HTTP_USER_ROLE_NONE : HTTP_USER_ROLE_ANY, + 0); } - -#ifdef NETDATA_GOD_MODE - web_client_flag_set(w, WEB_CLIENT_FLAG_AUTH_GOD); - w->user_role = HTTP_USER_ROLE_ADMIN; - w->access = HTTP_ACCESS_ALL; #endif if(unlikely(!url_path_endpoint || !*url_path_endpoint)) { @@ -110,150 +117,6 @@ RRDCONTEXT_TO_JSON_OPTIONS rrdcontext_to_json_parse_options(char *o) { return options; } -int web_client_api_request_weights(RRDHOST *host, struct web_client *w, char *url, WEIGHTS_METHOD method, WEIGHTS_FORMAT format, size_t api_version) { - if (!netdata_ready) - return HTTP_RESP_SERVICE_UNAVAILABLE; - - time_t baseline_after = 0, baseline_before = 0, after = 0, before = 0; - size_t points = 0; - RRDR_OPTIONS options = 0; - RRDR_TIME_GROUPING time_group_method = RRDR_GROUPING_AVERAGE; - time_t timeout_ms = 0; - size_t tier = 0; - const char *time_group_options = NULL, *scope_contexts = NULL, *scope_nodes = NULL, *contexts = NULL, *nodes = NULL, - *instances = NULL, *dimensions = NULL, *labels = NULL, *alerts = NULL; - - struct group_by_pass group_by = { - .group_by = RRDR_GROUP_BY_NONE, - .group_by_label = NULL, - .aggregation = RRDR_GROUP_BY_FUNCTION_AVERAGE, - }; - - while (url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) - continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) - continue; - if (!value || !*value) - continue; - - if (!strcmp(name, "baseline_after")) - baseline_after = str2l(value); - - else if (!strcmp(name, "baseline_before")) - baseline_before = str2l(value); - - else if (!strcmp(name, "after") || !strcmp(name, "highlight_after")) - after = str2l(value); - - else if (!strcmp(name, "before") || !strcmp(name, "highlight_before")) - before = str2l(value); - - else if (!strcmp(name, "points") || !strcmp(name, "max_points")) - points = str2ul(value); - - else if (!strcmp(name, "timeout")) - timeout_ms = str2l(value); - - else if((api_version == 1 && !strcmp(name, "group")) || (api_version >= 2 && !strcmp(name, "time_group"))) - time_group_method = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); - - else if((api_version == 1 && !strcmp(name, "group_options")) || (api_version >= 2 && !strcmp(name, "time_group_options"))) - time_group_options = value; - - else if(!strcmp(name, "options")) - options |= rrdr_options_parse(value); - - else if(!strcmp(name, "method")) - method = weights_string_to_method(value); - - else if(api_version == 1 && (!strcmp(name, "context") || !strcmp(name, "contexts"))) - scope_contexts = value; - - else if(api_version >= 2 && !strcmp(name, "scope_nodes")) scope_nodes = value; - else if(api_version >= 2 && !strcmp(name, "scope_contexts")) scope_contexts = value; - else if(api_version >= 2 && !strcmp(name, "nodes")) nodes = value; - else if(api_version >= 2 && !strcmp(name, "contexts")) contexts = value; - else if(api_version >= 2 && !strcmp(name, "instances")) instances = value; - else if(api_version >= 2 && !strcmp(name, "dimensions")) dimensions = value; - else if(api_version >= 2 && !strcmp(name, "labels")) labels = value; - else if(api_version >= 2 && !strcmp(name, "alerts")) alerts = value; - else if(api_version >= 2 && (!strcmp(name, "group_by") || !strcmp(name, "group_by[0]"))) { - group_by.group_by = group_by_parse(value); - } - else if(api_version >= 2 && (!strcmp(name, "group_by_label") || !strcmp(name, "group_by_label[0]"))) { - group_by.group_by_label = value; - } - else if(api_version >= 2 && (!strcmp(name, "aggregation") || !strcmp(name, "aggregation[0]"))) { - group_by.aggregation = group_by_aggregate_function_parse(value); - } - - else if(!strcmp(name, "tier")) { - tier = str2ul(value); - if(tier < storage_tiers) - options |= RRDR_OPTION_SELECTED_TIER; - else - tier = 0; - } - } - - if(options == 0) - // the user did not set any options - options = RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NULL2ZERO | RRDR_OPTION_NONZERO; - else - // the user set some options, add also these - options |= RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NULL2ZERO; - - if(options & RRDR_OPTION_PERCENTAGE) - options |= RRDR_OPTION_ABSOLUTE; - - if(options & RRDR_OPTION_DEBUG) - options &= ~RRDR_OPTION_MINIFY; - - BUFFER *wb = w->response.data; - buffer_flush(wb); - wb->content_type = CT_APPLICATION_JSON; - - QUERY_WEIGHTS_REQUEST qwr = { - .version = api_version, - .host = (api_version == 1) ? NULL : host, - .scope_nodes = scope_nodes, - .scope_contexts = scope_contexts, - .nodes = nodes, - .contexts = contexts, - .instances = instances, - .dimensions = dimensions, - .labels = labels, - .alerts = alerts, - .group_by = { - .group_by = group_by.group_by, - .group_by_label = group_by.group_by_label, - .aggregation = group_by.aggregation, - }, - .method = method, - .format = format, - .time_group_method = time_group_method, - .time_group_options = time_group_options, - .baseline_after = baseline_after, - .baseline_before = baseline_before, - .after = after, - .before = before, - .points = points, - .options = options, - .tier = tier, - .timeout_ms = timeout_ms, - - .interrupt_callback = web_client_interrupt_callback, - .interrupt_callback_data = w, - - .transaction = &w->transaction, - }; - - return web_api_v12_weights(wb, &qwr); -} bool web_client_interrupt_callback(void *data) { struct web_client *w = data; @@ -263,3 +126,54 @@ bool web_client_interrupt_callback(void *data) { return sock_has_output_error(w->ofd); } + +void nd_web_api_init(void) { + contexts_alert_statuses_init(); + rrdr_options_init(); + contexts_options_init(); + datasource_formats_init(); + time_grouping_init(); +} + + +bool request_source_is_cloud(const char *source) { + return source && *source && strstartswith(source, "method=NC,"); +} + +void web_client_api_request_vX_source_to_buffer(struct web_client *w, BUFFER *source) { + if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_CLOUD)) + buffer_sprintf(source, "method=NC"); + else if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_BEARER)) + buffer_sprintf(source, "method=api-bearer"); + else + buffer_sprintf(source, "method=api"); + + if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_GOD)) + buffer_strcat(source, ",role=god"); + else + buffer_sprintf(source, ",role=%s", http_id2user_role(w->user_role)); + + buffer_sprintf(source, ",permissions="HTTP_ACCESS_FORMAT, (HTTP_ACCESS_FORMAT_CAST)w->access); + + if(w->auth.client_name[0]) + buffer_sprintf(source, ",user=%s", w->auth.client_name); + + if(!uuid_is_null(w->auth.cloud_account_id)) { + char uuid_str[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(w->auth.cloud_account_id, uuid_str); + buffer_sprintf(source, ",account=%s", uuid_str); + } + + if(w->client_ip[0]) + buffer_sprintf(source, ",ip=%s", w->client_ip); + + if(w->forwarded_for) + buffer_sprintf(source, ",forwarded_for=%s", w->forwarded_for); +} + +void web_client_progress_functions_update(void *data, size_t done, size_t all) { + // handle progress updates from the plugin + struct web_client *w = data; + query_progress_functions_update(&w->transaction, done, all); +} + diff --git a/src/web/api/web_api.h b/src/web/api/web_api.h index 634e59657a1c88..cb694a33d7369e 100644 --- a/src/web/api/web_api.h +++ b/src/web/api/web_api.h @@ -3,14 +3,28 @@ #ifndef NETDATA_WEB_API_H #define NETDATA_WEB_API_H 1 +#define ENABLE_API_V1 1 +#define ENABLE_API_v2 1 + +struct web_client; + #include "daemon/common.h" +#include "maps/maps.h" +#include "functions/functions.h" + #include "web/api/http_header.h" #include "web/api/http_auth.h" -#include "web/api/badges/web_buffer_svg.h" -#include "web/api/ilove/ilove.h" #include "web/api/formatters/rrd2json.h" #include "web/api/queries/weights.h" +void nd_web_api_init(void); + +bool request_source_is_cloud(const char *source); +void web_client_api_request_vX_source_to_buffer(struct web_client *w, BUFFER *source); +void web_client_progress_functions_update(void *data, size_t done, size_t all); + +void host_labels2json(RRDHOST *host, BUFFER *wb, const char *key); + struct web_api_command { const char *api; uint32_t hash; @@ -37,7 +51,11 @@ int web_client_api_request_weights(RRDHOST *host, struct web_client *w, char *ur bool web_client_interrupt_callback(void *data); +char *format_value_and_unit(char *value_string, size_t value_string_len, + NETDATA_DOUBLE value, const char *units, int precision); + #include "web_api_v1.h" #include "web_api_v2.h" +#include "web_api_v3.h" #endif //NETDATA_WEB_API_H diff --git a/src/web/api/web_api_v1.c b/src/web/api/web_api_v1.c index bfaa4f6f7379d5..a6488702ddd6c5 100644 --- a/src/web/api/web_api_v1.c +++ b/src/web/api/web_api_v1.c @@ -1,1958 +1,224 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "web_api_v1.h" - -char *api_secret; - -static struct { - const char *name; - uint32_t hash; - RRDR_OPTIONS value; -} rrdr_options[] = { - { "nonzero" , 0 , RRDR_OPTION_NONZERO} - , {"flip" , 0 , RRDR_OPTION_REVERSED} - , {"reversed" , 0 , RRDR_OPTION_REVERSED} - , {"reverse" , 0 , RRDR_OPTION_REVERSED} - , {"jsonwrap" , 0 , RRDR_OPTION_JSON_WRAP} - , {"min2max" , 0 , RRDR_OPTION_DIMS_MIN2MAX} // rrdr2value() only - , {"average" , 0 , RRDR_OPTION_DIMS_AVERAGE} // rrdr2value() only - , {"min" , 0 , RRDR_OPTION_DIMS_MIN} // rrdr2value() only - , {"max" , 0 , RRDR_OPTION_DIMS_MAX} // rrdr2value() only - , {"ms" , 0 , RRDR_OPTION_MILLISECONDS} - , {"milliseconds" , 0 , RRDR_OPTION_MILLISECONDS} - , {"absolute" , 0 , RRDR_OPTION_ABSOLUTE} - , {"abs" , 0 , RRDR_OPTION_ABSOLUTE} - , {"absolute_sum" , 0 , RRDR_OPTION_ABSOLUTE} - , {"absolute-sum" , 0 , RRDR_OPTION_ABSOLUTE} - , {"display_absolute" , 0 , RRDR_OPTION_DISPLAY_ABS} - , {"display-absolute" , 0 , RRDR_OPTION_DISPLAY_ABS} - , {"seconds" , 0 , RRDR_OPTION_SECONDS} - , {"null2zero" , 0 , RRDR_OPTION_NULL2ZERO} - , {"objectrows" , 0 , RRDR_OPTION_OBJECTSROWS} - , {"google_json" , 0 , RRDR_OPTION_GOOGLE_JSON} - , {"google-json" , 0 , RRDR_OPTION_GOOGLE_JSON} - , {"percentage" , 0 , RRDR_OPTION_PERCENTAGE} - , {"unaligned" , 0 , RRDR_OPTION_NOT_ALIGNED} - , {"match_ids" , 0 , RRDR_OPTION_MATCH_IDS} - , {"match-ids" , 0 , RRDR_OPTION_MATCH_IDS} - , {"match_names" , 0 , RRDR_OPTION_MATCH_NAMES} - , {"match-names" , 0 , RRDR_OPTION_MATCH_NAMES} - , {"anomaly-bit" , 0 , RRDR_OPTION_ANOMALY_BIT} - , {"selected-tier" , 0 , RRDR_OPTION_SELECTED_TIER} - , {"raw" , 0 , RRDR_OPTION_RETURN_RAW} - , {"jw-anomaly-rates" , 0 , RRDR_OPTION_RETURN_JWAR} - , {"natural-points" , 0 , RRDR_OPTION_NATURAL_POINTS} - , {"virtual-points" , 0 , RRDR_OPTION_VIRTUAL_POINTS} - , {"all-dimensions" , 0 , RRDR_OPTION_ALL_DIMENSIONS} - , {"details" , 0 , RRDR_OPTION_SHOW_DETAILS} - , {"debug" , 0 , RRDR_OPTION_DEBUG} - , {"plan" , 0 , RRDR_OPTION_DEBUG} - , {"minify" , 0 , RRDR_OPTION_MINIFY} - , {"group-by-labels" , 0 , RRDR_OPTION_GROUP_BY_LABELS} - , {"label-quotes" , 0 , RRDR_OPTION_LABEL_QUOTES} - , {NULL , 0 , 0} -}; - -static struct { - const char *name; - uint32_t hash; - CONTEXTS_V2_OPTIONS value; -} contexts_v2_options[] = { - {"minify" , 0 , CONTEXT_V2_OPTION_MINIFY} - , {"debug" , 0 , CONTEXT_V2_OPTION_DEBUG} - , {"config" , 0 , CONTEXT_V2_OPTION_ALERTS_WITH_CONFIGURATIONS} - , {"instances" , 0 , CONTEXT_V2_OPTION_ALERTS_WITH_INSTANCES} - , {"values" , 0 , CONTEXT_V2_OPTION_ALERTS_WITH_VALUES} - , {"summary" , 0 , CONTEXT_V2_OPTION_ALERTS_WITH_SUMMARY} - , {NULL , 0 , 0} -}; - -static struct { - const char *name; - uint32_t hash; - CONTEXTS_V2_ALERT_STATUS value; -} contexts_v2_alert_status[] = { - {"uninitialized" , 0 , CONTEXT_V2_ALERT_UNINITIALIZED} - , {"undefined" , 0 , CONTEXT_V2_ALERT_UNDEFINED} - , {"clear" , 0 , CONTEXT_V2_ALERT_CLEAR} - , {"raised" , 0 , CONTEXT_V2_ALERT_RAISED} - , {"active" , 0 , CONTEXT_V2_ALERT_RAISED} - , {"warning" , 0 , CONTEXT_V2_ALERT_WARNING} - , {"critical" , 0 , CONTEXT_V2_ALERT_CRITICAL} - , {NULL , 0 , 0} -}; - -static struct { - const char *name; - uint32_t hash; - DATASOURCE_FORMAT value; -} api_v1_data_formats[] = { - { DATASOURCE_FORMAT_DATATABLE_JSON , 0 , DATASOURCE_DATATABLE_JSON} - , {DATASOURCE_FORMAT_DATATABLE_JSONP, 0 , DATASOURCE_DATATABLE_JSONP} - , {DATASOURCE_FORMAT_JSON , 0 , DATASOURCE_JSON} - , {DATASOURCE_FORMAT_JSON2 , 0 , DATASOURCE_JSON2} - , {DATASOURCE_FORMAT_JSONP , 0 , DATASOURCE_JSONP} - , {DATASOURCE_FORMAT_SSV , 0 , DATASOURCE_SSV} - , {DATASOURCE_FORMAT_CSV , 0 , DATASOURCE_CSV} - , {DATASOURCE_FORMAT_TSV , 0 , DATASOURCE_TSV} - , {"tsv-excel" , 0 , DATASOURCE_TSV} - , {DATASOURCE_FORMAT_HTML , 0 , DATASOURCE_HTML} - , {DATASOURCE_FORMAT_JS_ARRAY , 0 , DATASOURCE_JS_ARRAY} - , {DATASOURCE_FORMAT_SSV_COMMA , 0 , DATASOURCE_SSV_COMMA} - , {DATASOURCE_FORMAT_CSV_JSON_ARRAY , 0 , DATASOURCE_CSV_JSON_ARRAY} - , {DATASOURCE_FORMAT_CSV_MARKDOWN , 0 , DATASOURCE_CSV_MARKDOWN} - - // terminator - , {NULL, 0, 0} -}; - -static struct { - const char *name; - uint32_t hash; - DATASOURCE_FORMAT value; -} api_v1_data_google_formats[] = { - // this is not an error - when Google requests json, it expects javascript - // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source#responseformat - {"json", 0, DATASOURCE_DATATABLE_JSONP} - , {"html", 0, DATASOURCE_HTML} - , {"csv", 0, DATASOURCE_CSV} - , {"tsv-excel", 0, DATASOURCE_TSV} - - // terminator - , {NULL, 0, 0} -}; - -void web_client_api_v1_init(void) { - int i; - - for(i = 0; contexts_v2_alert_status[i].name ; i++) - contexts_v2_alert_status[i].hash = simple_hash(contexts_v2_alert_status[i].name); - - for(i = 0; rrdr_options[i].name ; i++) - rrdr_options[i].hash = simple_hash(rrdr_options[i].name); - - for(i = 0; contexts_v2_options[i].name ; i++) - contexts_v2_options[i].hash = simple_hash(contexts_v2_options[i].name); - - for(i = 0; api_v1_data_formats[i].name ; i++) - api_v1_data_formats[i].hash = simple_hash(api_v1_data_formats[i].name); - - for(i = 0; api_v1_data_google_formats[i].name ; i++) - api_v1_data_google_formats[i].hash = simple_hash(api_v1_data_google_formats[i].name); - - time_grouping_init(); - - nd_uuid_t uuid; - - // generate - uuid_generate(uuid); - - // unparse (to string) - char uuid_str[37]; - uuid_unparse_lower(uuid, uuid_str); -} - -char *get_mgmt_api_key(void) { - char filename[FILENAME_MAX + 1]; - snprintfz(filename, FILENAME_MAX, "%s/netdata.api.key", netdata_configured_varlib_dir); - char *api_key_filename=config_get(CONFIG_SECTION_REGISTRY, "netdata management api key file", filename); - static char guid[GUID_LEN + 1] = ""; - - if(likely(guid[0])) - return guid; - - // read it from disk - int fd = open(api_key_filename, O_RDONLY | O_CLOEXEC); - if(fd != -1) { - char buf[GUID_LEN + 1]; - if(read(fd, buf, GUID_LEN) != GUID_LEN) - netdata_log_error("Failed to read management API key from '%s'", api_key_filename); - else { - buf[GUID_LEN] = '\0'; - if(regenerate_guid(buf, guid) == -1) { - netdata_log_error("Failed to validate management API key '%s' from '%s'.", - buf, api_key_filename); - - guid[0] = '\0'; - } - } - close(fd); - } - - // generate a new one? - if(!guid[0]) { - nd_uuid_t uuid; - - uuid_generate_time(uuid); - uuid_unparse_lower(uuid, guid); - guid[GUID_LEN] = '\0'; - - // save it - fd = open(api_key_filename, O_WRONLY|O_CREAT|O_TRUNC | O_CLOEXEC, 444); - if(fd == -1) { - netdata_log_error("Cannot create unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file.", api_key_filename); - goto temp_key; - } - - if(write(fd, guid, GUID_LEN) != GUID_LEN) { - netdata_log_error("Cannot write the unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file with enough space left.", api_key_filename); - close(fd); - goto temp_key; - } - - close(fd); - } - - return guid; - -temp_key: - netdata_log_info("You can still continue to use the alarm management API using the authorization token %s during this Netdata session only.", guid); - return guid; -} - -void web_client_api_v1_management_init(void) { - api_secret = get_mgmt_api_key(); -} - -inline RRDR_OPTIONS rrdr_options_parse_one(const char *o) { - RRDR_OPTIONS ret = 0; - - if(!o || !*o) return ret; - - uint32_t hash = simple_hash(o); - int i; - for(i = 0; rrdr_options[i].name ; i++) { - if (unlikely(hash == rrdr_options[i].hash && !strcmp(o, rrdr_options[i].name))) { - ret |= rrdr_options[i].value; - break; - } - } - - return ret; -} - -inline RRDR_OPTIONS rrdr_options_parse(char *o) { - RRDR_OPTIONS ret = 0; - char *tok; - - while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) { - if(!*tok) continue; - ret |= rrdr_options_parse_one(tok); - } - - return ret; -} - -inline CONTEXTS_V2_OPTIONS web_client_api_request_v2_context_options(char *o) { - CONTEXTS_V2_OPTIONS ret = 0; - char *tok; - - while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) { - if(!*tok) continue; - - uint32_t hash = simple_hash(tok); - int i; - for(i = 0; contexts_v2_options[i].name ; i++) { - if (unlikely(hash == contexts_v2_options[i].hash && !strcmp(tok, contexts_v2_options[i].name))) { - ret |= contexts_v2_options[i].value; - break; - } - } - } - - return ret; -} - -inline CONTEXTS_V2_ALERT_STATUS web_client_api_request_v2_alert_status(char *o) { - CONTEXTS_V2_ALERT_STATUS ret = 0; - char *tok; - - while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) { - if(!*tok) continue; - - uint32_t hash = simple_hash(tok); - int i; - for(i = 0; contexts_v2_alert_status[i].name ; i++) { - if (unlikely(hash == contexts_v2_alert_status[i].hash && !strcmp(tok, contexts_v2_alert_status[i].name))) { - ret |= contexts_v2_alert_status[i].value; - break; - } - } - } - - return ret; -} - -void web_client_api_request_v2_contexts_alerts_status_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_V2_ALERT_STATUS options) { - buffer_json_member_add_array(wb, key); - - RRDR_OPTIONS used = 0; // to prevent adding duplicates - for(int i = 0; contexts_v2_alert_status[i].name ; i++) { - if (unlikely((contexts_v2_alert_status[i].value & options) && !(contexts_v2_alert_status[i].value & used))) { - const char *name = contexts_v2_alert_status[i].name; - used |= contexts_v2_alert_status[i].value; - - buffer_json_add_array_item_string(wb, name); - } - } - - buffer_json_array_close(wb); -} - -void web_client_api_request_v2_contexts_options_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_V2_OPTIONS options) { - buffer_json_member_add_array(wb, key); - - RRDR_OPTIONS used = 0; // to prevent adding duplicates - for(int i = 0; contexts_v2_options[i].name ; i++) { - if (unlikely((contexts_v2_options[i].value & options) && !(contexts_v2_options[i].value & used))) { - const char *name = contexts_v2_options[i].name; - used |= contexts_v2_options[i].value; - - buffer_json_add_array_item_string(wb, name); - } - } - - buffer_json_array_close(wb); -} - -void rrdr_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options) { - buffer_json_member_add_array(wb, key); - - RRDR_OPTIONS used = 0; // to prevent adding duplicates - for(int i = 0; rrdr_options[i].name ; i++) { - if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) { - const char *name = rrdr_options[i].name; - used |= rrdr_options[i].value; - - buffer_json_add_array_item_string(wb, name); - } - } - - buffer_json_array_close(wb); -} - -void rrdr_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options) { - RRDR_OPTIONS used = 0; // to prevent adding duplicates - size_t added = 0; - for(int i = 0; rrdr_options[i].name ; i++) { - if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) { - const char *name = rrdr_options[i].name; - used |= rrdr_options[i].value; - - if(added++) buffer_strcat(wb, " "); - buffer_strcat(wb, name); - } - } -} - -void web_client_api_request_v1_data_options_to_string(char *buf, size_t size, RRDR_OPTIONS options) { - char *write = buf; - char *end = &buf[size - 1]; - - RRDR_OPTIONS used = 0; // to prevent adding duplicates - int added = 0; - for(int i = 0; rrdr_options[i].name ; i++) { - if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) { - const char *name = rrdr_options[i].name; - used |= rrdr_options[i].value; - - if(added && write < end) - *write++ = ','; - - while(*name && write < end) - *write++ = *name++; - - added++; - } - } - *write = *end = '\0'; -} - -inline uint32_t web_client_api_request_v1_data_format(char *name) { - uint32_t hash = simple_hash(name); - int i; - - for(i = 0; api_v1_data_formats[i].name ; i++) { - if (unlikely(hash == api_v1_data_formats[i].hash && !strcmp(name, api_v1_data_formats[i].name))) { - return api_v1_data_formats[i].value; - } - } - - return DATASOURCE_JSON; -} - -inline uint32_t web_client_api_request_v1_data_google_format(char *name) { - uint32_t hash = simple_hash(name); - int i; - - for(i = 0; api_v1_data_google_formats[i].name ; i++) { - if (unlikely(hash == api_v1_data_google_formats[i].hash && !strcmp(name, api_v1_data_google_formats[i].name))) { - return api_v1_data_google_formats[i].value; - } - } - - return DATASOURCE_JSON; -} - -int web_client_api_request_v1_alarms_select (char *url) { - int all = 0; - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - if(!strcmp(value, "all") || !strcmp(value, "all=true")) all = 1; - else if(!strcmp(value, "active") || !strcmp(value, "active=true")) all = 0; - } - - return all; -} - -inline int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url) { - int all = web_client_api_request_v1_alarms_select(url); - - buffer_flush(w->response.data); - w->response.data->content_type = CT_APPLICATION_JSON; - health_alarms2json(host, w->response.data, all); - buffer_no_cacheable(w->response.data); - return HTTP_RESP_OK; -} - -inline int web_client_api_request_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url) { - int all = web_client_api_request_v1_alarms_select(url); - - buffer_flush(w->response.data); - w->response.data->content_type = CT_APPLICATION_JSON; - health_alarms_values2json(host, w->response.data, all); - buffer_no_cacheable(w->response.data); - return HTTP_RESP_OK; -} - -inline int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url) { - RRDCALC_STATUS status = RRDCALC_STATUS_RAISED; - BUFFER *contexts = NULL; - - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, "["); - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 alarm_count query param '%s' with value '%s'", w->id, name, value); - - char* p = value; - if(!strcmp(name, "status")) { - while ((*p = toupper(*p))) p++; - if (!strcmp("CRITICAL", value)) status = RRDCALC_STATUS_CRITICAL; - else if (!strcmp("WARNING", value)) status = RRDCALC_STATUS_WARNING; - else if (!strcmp("UNINITIALIZED", value)) status = RRDCALC_STATUS_UNINITIALIZED; - else if (!strcmp("UNDEFINED", value)) status = RRDCALC_STATUS_UNDEFINED; - else if (!strcmp("REMOVED", value)) status = RRDCALC_STATUS_REMOVED; - else if (!strcmp("CLEAR", value)) status = RRDCALC_STATUS_CLEAR; - } - else if(!strcmp(name, "context") || !strcmp(name, "ctx")) { - if(!contexts) contexts = buffer_create(255, &netdata_buffers_statistics.buffers_api); - buffer_strcat(contexts, "|"); - buffer_strcat(contexts, value); - } - } - - health_aggregate_alarms(host, w->response.data, contexts, status); - - buffer_sprintf(w->response.data, "]\n"); - w->response.data->content_type = CT_APPLICATION_JSON; - buffer_no_cacheable(w->response.data); - - buffer_free(contexts); - return 200; -} - -inline int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url) { - time_t after = 0; - char *chart = NULL; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - if (!strcmp(name, "after")) after = (time_t) strtoul(value, NULL, 0); - else if (!strcmp(name, "chart")) chart = value; - } - - buffer_flush(w->response.data); - w->response.data->content_type = CT_APPLICATION_JSON; - sql_health_alarm_log2json(host, w->response.data, after, chart); - return HTTP_RESP_OK; -} - -inline int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)) { - int ret = HTTP_RESP_BAD_REQUEST; - char *chart = NULL; - - buffer_flush(w->response.data); - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "chart")) chart = value; - //else { - /// buffer_sprintf(w->response.data, "Unknown parameter '%s' in request.", name); - // goto cleanup; - //} - } - - if(!chart || !*chart) { - buffer_sprintf(w->response.data, "No chart id is given at the request."); - goto cleanup; - } - - RRDSET *st = rrdset_find(host, chart); - if(!st) st = rrdset_find_byname(host, chart); - if(!st) { - buffer_strcat(w->response.data, "Chart is not found: "); - buffer_strcat_htmlescape(w->response.data, chart); - ret = HTTP_RESP_NOT_FOUND; - goto cleanup; - } - - w->response.data->content_type = CT_APPLICATION_JSON; - st->last_accessed_time_s = now_realtime_sec(); - callback(st, w->response.data); - return HTTP_RESP_OK; - - cleanup: - return ret; -} - -static inline int web_client_api_request_variable(RRDHOST *host, struct web_client *w, char *url) { - int ret = HTTP_RESP_BAD_REQUEST; - char *chart = NULL; - char *variable = NULL; - - buffer_flush(w->response.data); - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "chart")) chart = value; - else if(!strcmp(name, "variable")) variable = value; - } - - if(!chart || !*chart || !variable || !*variable) { - buffer_sprintf(w->response.data, "A chart= and a variable= are required."); - goto cleanup; - } - - RRDSET *st = rrdset_find(host, chart); - if(!st) st = rrdset_find_byname(host, chart); - if(!st) { - buffer_strcat(w->response.data, "Chart is not found: "); - buffer_strcat_htmlescape(w->response.data, chart); - ret = HTTP_RESP_NOT_FOUND; - goto cleanup; - } - - w->response.data->content_type = CT_APPLICATION_JSON; - st->last_accessed_time_s = now_realtime_sec(); - alert_variable_lookup_trace(host, st, variable, w->response.data); - - return HTTP_RESP_OK; - -cleanup: - return ret; -} - -inline int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url) { - return web_client_api_request_single_chart(host, w, url, health_api_v1_chart_variables2json); -} - -static int web_client_api_request_v1_context(RRDHOST *host, struct web_client *w, char *url) { - char *context = NULL; - RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE; - time_t after = 0, before = 0; - const char *chart_label_key = NULL, *chart_labels_filter = NULL; - BUFFER *dimensions = NULL; - - buffer_flush(w->response.data); - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "context") || !strcmp(name, "ctx")) context = value; - else if(!strcmp(name, "after")) after = str2l(value); - else if(!strcmp(name, "before")) before = str2l(value); - else if(!strcmp(name, "options")) options = rrdcontext_to_json_parse_options(value); - else if(!strcmp(name, "chart_label_key")) chart_label_key = value; - else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value; - else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { - if(!dimensions) dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); - buffer_strcat(dimensions, "|"); - buffer_strcat(dimensions, value); - } - } - - if(!context || !*context) { - buffer_sprintf(w->response.data, "No context is given at the request."); - return HTTP_RESP_BAD_REQUEST; - } - - SIMPLE_PATTERN *chart_label_key_pattern = NULL; - SIMPLE_PATTERN *chart_labels_filter_pattern = NULL; - SIMPLE_PATTERN *chart_dimensions_pattern = NULL; - - if(chart_label_key) - chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, true); - - if(chart_labels_filter) - chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, - true); - - if(dimensions) { - chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", - SIMPLE_PATTERN_EXACT, true); - buffer_free(dimensions); - } - - w->response.data->content_type = CT_APPLICATION_JSON; - int ret = rrdcontext_to_json(host, w->response.data, after, before, options, context, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern); - - simple_pattern_free(chart_label_key_pattern); - simple_pattern_free(chart_labels_filter_pattern); - simple_pattern_free(chart_dimensions_pattern); - - return ret; -} - -static int web_client_api_request_v1_contexts(RRDHOST *host, struct web_client *w, char *url) { - RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE; - time_t after = 0, before = 0; - const char *chart_label_key = NULL, *chart_labels_filter = NULL; - BUFFER *dimensions = NULL; - - buffer_flush(w->response.data); - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "after")) after = str2l(value); - else if(!strcmp(name, "before")) before = str2l(value); - else if(!strcmp(name, "options")) options = rrdcontext_to_json_parse_options(value); - else if(!strcmp(name, "chart_label_key")) chart_label_key = value; - else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value; - else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { - if(!dimensions) dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); - buffer_strcat(dimensions, "|"); - buffer_strcat(dimensions, value); - } - } - - SIMPLE_PATTERN *chart_label_key_pattern = NULL; - SIMPLE_PATTERN *chart_labels_filter_pattern = NULL; - SIMPLE_PATTERN *chart_dimensions_pattern = NULL; - - if(chart_label_key) - chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, true); - - if(chart_labels_filter) - chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, - true); - - if(dimensions) { - chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", - SIMPLE_PATTERN_EXACT, true); - buffer_free(dimensions); - } - - w->response.data->content_type = CT_APPLICATION_JSON; - int ret = rrdcontexts_to_json(host, w->response.data, after, before, options, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern); - - simple_pattern_free(chart_label_key_pattern); - simple_pattern_free(chart_labels_filter_pattern); - simple_pattern_free(chart_dimensions_pattern); - - return ret; -} - -inline int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url) { - (void)url; - - buffer_flush(w->response.data); - w->response.data->content_type = CT_APPLICATION_JSON; - charts2json(host, w->response.data); - return HTTP_RESP_OK; -} - -inline int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url) { - return web_client_api_request_single_chart(host, w, url, rrd_stats_api_v1_chart); -} - -// returns the HTTP code -static inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url) { - netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 data with URL '%s'", w->id, url); - - int ret = HTTP_RESP_BAD_REQUEST; - BUFFER *dimensions = NULL; - - buffer_flush(w->response.data); - - char *google_version = "0.6", - *google_reqId = "0", - *google_sig = "0", - *google_out = "json", - *responseHandler = NULL, - *outFileName = NULL; - - time_t last_timestamp_in_data = 0, google_timestamp = 0; - - char *chart = NULL; - char *before_str = NULL; - char *after_str = NULL; - char *group_time_str = NULL; - char *points_str = NULL; - char *timeout_str = NULL; - char *context = NULL; - char *chart_label_key = NULL; - char *chart_labels_filter = NULL; - char *group_options = NULL; - size_t tier = 0; - RRDR_TIME_GROUPING group = RRDR_GROUPING_AVERAGE; - DATASOURCE_FORMAT format = DATASOURCE_JSON; - RRDR_OPTIONS options = 0; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 data query param '%s' with value '%s'", w->id, name, value); - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "context")) context = value; - else if(!strcmp(name, "chart_label_key")) chart_label_key = value; - else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value; - else if(!strcmp(name, "chart")) chart = value; - else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) { - if(!dimensions) dimensions = buffer_create(100, &netdata_buffers_statistics.buffers_api); - buffer_strcat(dimensions, "|"); - buffer_strcat(dimensions, value); - } - else if(!strcmp(name, "show_dimensions")) options |= RRDR_OPTION_ALL_DIMENSIONS; - else if(!strcmp(name, "after")) after_str = value; - else if(!strcmp(name, "before")) before_str = value; - else if(!strcmp(name, "points")) points_str = value; - else if(!strcmp(name, "timeout")) timeout_str = value; - else if(!strcmp(name, "gtime")) group_time_str = value; - else if(!strcmp(name, "group_options")) group_options = value; - else if(!strcmp(name, "group")) { - group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); - } - else if(!strcmp(name, "format")) { - format = web_client_api_request_v1_data_format(value); - } - else if(!strcmp(name, "options")) { - options |= rrdr_options_parse(value); - } - else if(!strcmp(name, "callback")) { - responseHandler = value; - } - else if(!strcmp(name, "filename")) { - outFileName = value; - } - else if(!strcmp(name, "tqx")) { - // parse Google Visualization API options - // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source - char *tqx_name, *tqx_value; - - while(value) { - tqx_value = strsep_skip_consecutive_separators(&value, ";"); - if(!tqx_value || !*tqx_value) continue; - - tqx_name = strsep_skip_consecutive_separators(&tqx_value, ":"); - if(!tqx_name || !*tqx_name) continue; - if(!tqx_value || !*tqx_value) continue; - - if(!strcmp(tqx_name, "version")) - google_version = tqx_value; - else if(!strcmp(tqx_name, "reqId")) - google_reqId = tqx_value; - else if(!strcmp(tqx_name, "sig")) { - google_sig = tqx_value; - google_timestamp = strtoul(google_sig, NULL, 0); - } - else if(!strcmp(tqx_name, "out")) { - google_out = tqx_value; - format = web_client_api_request_v1_data_google_format(google_out); - } - else if(!strcmp(tqx_name, "responseHandler")) - responseHandler = tqx_value; - else if(!strcmp(tqx_name, "outFileName")) - outFileName = tqx_value; - } - } - else if(!strcmp(name, "tier")) { - tier = str2ul(value); - if(tier < storage_tiers) - options |= RRDR_OPTION_SELECTED_TIER; - else - tier = 0; - } - } - - // validate the google parameters given - fix_google_param(google_out); - fix_google_param(google_sig); - fix_google_param(google_reqId); - fix_google_param(google_version); - fix_google_param(responseHandler); - fix_google_param(outFileName); - - RRDSET *st = NULL; - ONEWAYALLOC *owa = onewayalloc_create(0); - QUERY_TARGET *qt = NULL; - - if(!is_valid_sp(chart) && !is_valid_sp(context)) { - buffer_sprintf(w->response.data, "No chart or context is given."); - goto cleanup; - } - - if(chart && !context) { - // check if this is a specific chart - st = rrdset_find(host, chart); - if (!st) st = rrdset_find_byname(host, chart); - } - - long long before = (before_str && *before_str)?str2l(before_str):0; - long long after = (after_str && *after_str) ?str2l(after_str):-600; - int points = (points_str && *points_str)?str2i(points_str):0; - int timeout = (timeout_str && *timeout_str)?str2i(timeout_str): 0; - long group_time = (group_time_str && *group_time_str)?str2l(group_time_str):0; - - QUERY_TARGET_REQUEST qtr = { - .version = 1, - .after = after, - .before = before, - .host = host, - .st = st, - .nodes = NULL, - .contexts = context, - .instances = chart, - .dimensions = (dimensions)?buffer_tostring(dimensions):NULL, - .timeout_ms = timeout, - .points = points, - .format = format, - .options = options, - .time_group_method = group, - .time_group_options = group_options, - .resampling_time = group_time, - .tier = tier, - .chart_label_key = chart_label_key, - .labels = chart_labels_filter, - .query_source = QUERY_SOURCE_API_DATA, - .priority = STORAGE_PRIORITY_NORMAL, - .interrupt_callback = web_client_interrupt_callback, - .interrupt_callback_data = w, - .transaction = &w->transaction, - }; - qt = query_target_create(&qtr); - - if(!qt || !qt->query.used) { - buffer_sprintf(w->response.data, "No metrics where matched to query."); - ret = HTTP_RESP_NOT_FOUND; - goto cleanup; - } - - web_client_timeout_checkpoint_set(w, timeout); - if(web_client_timeout_checkpoint_and_check(w, NULL)) { - ret = w->response.code; - goto cleanup; - } - - if(outFileName && *outFileName) { - buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName); - netdata_log_debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName); - } - - if(format == DATASOURCE_DATATABLE_JSONP) { - if(responseHandler == NULL) - responseHandler = "google.visualization.Query.setResponse"; - - netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'", - w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName - ); - - buffer_sprintf( - w->response.data, - "%s({version:'%s',reqId:'%s',status:'ok',sig:'%"PRId64"',table:", - responseHandler, - google_version, - google_reqId, - (int64_t)(st ? st->last_updated.tv_sec : 0)); - } - else if(format == DATASOURCE_JSONP) { - if(responseHandler == NULL) - responseHandler = "callback"; - - buffer_strcat(w->response.data, responseHandler); - buffer_strcat(w->response.data, "("); - } - - ret = data_query_execute(owa, w->response.data, qt, &last_timestamp_in_data); - - if(format == DATASOURCE_DATATABLE_JSONP) { - if(google_timestamp < last_timestamp_in_data) - buffer_strcat(w->response.data, "});"); - - else { - // the client already has the latest data - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, - "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});", - responseHandler, google_version, google_reqId); - } - } - else if(format == DATASOURCE_JSONP) - buffer_strcat(w->response.data, ");"); - - if(qt->internal.relative) - buffer_no_cacheable(w->response.data); - else - buffer_cacheable(w->response.data); - -cleanup: - query_target_release(qt); - onewayalloc_destroy(owa); - buffer_free(dimensions); - return ret; -} - -// Pings a netdata server: -// /api/v1/registry?action=hello -// -// Access to a netdata registry: -// /api/v1/registry?action=access&machine=${machine_guid}&name=${hostname}&url=${url} -// -// Delete from a netdata registry: -// /api/v1/registry?action=delete&machine=${machine_guid}&name=${hostname}&url=${url}&delete_url=${delete_url} -// -// Search for the URLs of a machine: -// /api/v1/registry?action=search&for=${machine_guid} -// -// Impersonate: -// /api/v1/registry?action=switch&machine=${machine_guid}&name=${hostname}&url=${url}&to=${new_person_guid} -inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url) { - static uint32_t hash_action = 0, hash_access = 0, hash_hello = 0, hash_delete = 0, hash_search = 0, - hash_switch = 0, hash_machine = 0, hash_url = 0, hash_name = 0, hash_delete_url = 0, hash_for = 0, - hash_to = 0 /*, hash_redirects = 0 */; - - if(unlikely(!hash_action)) { - hash_action = simple_hash("action"); - hash_access = simple_hash("access"); - hash_hello = simple_hash("hello"); - hash_delete = simple_hash("delete"); - hash_search = simple_hash("search"); - hash_switch = simple_hash("switch"); - hash_machine = simple_hash("machine"); - hash_url = simple_hash("url"); - hash_name = simple_hash("name"); - hash_delete_url = simple_hash("delete_url"); - hash_for = simple_hash("for"); - hash_to = simple_hash("to"); -/* - hash_redirects = simple_hash("redirects"); -*/ - } - - netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 registry with URL '%s'", w->id, url); - - // TODO - // The browser may send multiple cookies with our id - - char person_guid[UUID_STR_LEN] = ""; - char *cookie = strstr(w->response.data->buffer, NETDATA_REGISTRY_COOKIE_NAME "="); - if(cookie) - strncpyz(person_guid, &cookie[sizeof(NETDATA_REGISTRY_COOKIE_NAME)], UUID_STR_LEN - 1); - else if(!extract_bearer_token_from_request(w, person_guid, sizeof(person_guid))) - person_guid[0] = '\0'; - - char action = '\0'; - char *machine_guid = NULL, - *machine_url = NULL, - *url_name = NULL, - *search_machine_guid = NULL, - *delete_url = NULL, - *to_person_guid = NULL; -/* - int redirects = 0; -*/ - - // Don't cache registry responses - buffer_no_cacheable(w->response.data); - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) continue; - if (!value || !*value) continue; - - netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 registry query param '%s' with value '%s'", w->id, name, value); - - uint32_t hash = simple_hash(name); - - if(hash == hash_action && !strcmp(name, "action")) { - uint32_t vhash = simple_hash(value); - - if(vhash == hash_access && !strcmp(value, "access")) action = 'A'; - else if(vhash == hash_hello && !strcmp(value, "hello")) action = 'H'; - else if(vhash == hash_delete && !strcmp(value, "delete")) action = 'D'; - else if(vhash == hash_search && !strcmp(value, "search")) action = 'S'; - else if(vhash == hash_switch && !strcmp(value, "switch")) action = 'W'; -#ifdef NETDATA_INTERNAL_CHECKS - else netdata_log_error("unknown registry action '%s'", value); -#endif /* NETDATA_INTERNAL_CHECKS */ - } -/* - else if(hash == hash_redirects && !strcmp(name, "redirects")) - redirects = atoi(value); -*/ - else if(hash == hash_machine && !strcmp(name, "machine")) - machine_guid = value; - - else if(hash == hash_url && !strcmp(name, "url")) - machine_url = value; - - else if(action == 'A') { - if(hash == hash_name && !strcmp(name, "name")) - url_name = value; - } - else if(action == 'D') { - if(hash == hash_delete_url && !strcmp(name, "delete_url")) - delete_url = value; - } - else if(action == 'S') { - if(hash == hash_for && !strcmp(name, "for")) - search_machine_guid = value; - } - else if(action == 'W') { - if(hash == hash_to && !strcmp(name, "to")) - to_person_guid = value; - } -#ifdef NETDATA_INTERNAL_CHECKS - else netdata_log_error("unused registry URL parameter '%s' with value '%s'", name, value); -#endif /* NETDATA_INTERNAL_CHECKS */ - } - - bool do_not_track = respect_web_browser_do_not_track_policy && web_client_has_donottrack(w); - - if(unlikely(action == 'H')) { - // HELLO request, dashboard ACL - analytics_log_dashboard(); - if(unlikely(!http_can_access_dashboard(w))) - return web_client_permission_denied_acl(w); - } - else { - // everything else, registry ACL - if(unlikely(!http_can_access_registry(w))) - return web_client_permission_denied_acl(w); - - if(unlikely(do_not_track)) { - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, "Your web browser is sending 'DNT: 1' (Do Not Track). The registry requires persistent cookies on your browser to work."); - return HTTP_RESP_BAD_REQUEST; - } - } - - buffer_no_cacheable(w->response.data); - - switch(action) { - case 'A': - if(unlikely(!machine_guid || !machine_url || !url_name)) { - netdata_log_error("Invalid registry request - access requires these parameters: machine ('%s'), url ('%s'), name ('%s')", machine_guid ? machine_guid : "UNSET", machine_url ? machine_url : "UNSET", url_name ? url_name : "UNSET"); - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry Access request."); - return HTTP_RESP_BAD_REQUEST; - } - - web_client_enable_tracking_required(w); - return registry_request_access_json(host, w, person_guid, machine_guid, machine_url, url_name, now_realtime_sec()); - - case 'D': - if(unlikely(!machine_guid || !machine_url || !delete_url)) { - netdata_log_error("Invalid registry request - delete requires these parameters: machine ('%s'), url ('%s'), delete_url ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", delete_url?delete_url:"UNSET"); - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry Delete request."); - return HTTP_RESP_BAD_REQUEST; - } - - web_client_enable_tracking_required(w); - return registry_request_delete_json(host, w, person_guid, machine_guid, machine_url, delete_url, now_realtime_sec()); - - case 'S': - if(unlikely(!search_machine_guid)) { - netdata_log_error("Invalid registry request - search requires these parameters: for ('%s')", search_machine_guid?search_machine_guid:"UNSET"); - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry Search request."); - return HTTP_RESP_BAD_REQUEST; - } - - web_client_enable_tracking_required(w); - return registry_request_search_json(host, w, person_guid, search_machine_guid); - - case 'W': - if(unlikely(!machine_guid || !machine_url || !to_person_guid)) { - netdata_log_error("Invalid registry request - switching identity requires these parameters: machine ('%s'), url ('%s'), to ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", to_person_guid?to_person_guid:"UNSET"); - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry Switch request."); - return HTTP_RESP_BAD_REQUEST; - } - - web_client_enable_tracking_required(w); - return registry_request_switch_json(host, w, person_guid, machine_guid, machine_url, to_person_guid, now_realtime_sec()); - - case 'H': - return registry_request_hello_json(host, w, do_not_track); - - default: - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "Invalid registry request - you need to set an action: hello, access, delete, search"); - return HTTP_RESP_BAD_REQUEST; - } -} - -void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb, const char *key) { - buffer_json_member_add_object(wb, key); - - size_t normal = 0, warning = 0, critical = 0; - RRDCALC *rc; - foreach_rrdcalc_in_rrdhost_read(host, rc) { - if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec)) - continue; - - switch(rc->status) { - case RRDCALC_STATUS_WARNING: - warning++; - break; - case RRDCALC_STATUS_CRITICAL: - critical++; - break; - default: - normal++; - } - } - foreach_rrdcalc_in_rrdhost_done(rc); - - buffer_json_member_add_uint64(wb, "normal", normal); - buffer_json_member_add_uint64(wb, "warning", warning); - buffer_json_member_add_uint64(wb, "critical", critical); - - buffer_json_object_close(wb); -} - -static inline void web_client_api_request_v1_info_mirrored_hosts_status(BUFFER *wb, RRDHOST *host) { - buffer_json_add_array_item_object(wb); - - buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(host)); - buffer_json_member_add_uint64(wb, "hops", host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1); - buffer_json_member_add_boolean(wb, "reachable", (host == localhost || !rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))); - - buffer_json_member_add_string(wb, "guid", host->machine_guid); - buffer_json_member_add_uuid(wb, "node_id", host->node_id); - rrdhost_aclk_state_lock(host); - buffer_json_member_add_string(wb, "claim_id", host->aclk_state.claimed_id); - rrdhost_aclk_state_unlock(host); - - buffer_json_object_close(wb); -} - -static inline void web_client_api_request_v1_info_mirrored_hosts(BUFFER *wb) { - RRDHOST *host; - - rrd_rdlock(); - - buffer_json_member_add_array(wb, "mirrored_hosts"); - rrdhost_foreach_read(host) - buffer_json_add_array_item_string(wb, rrdhost_hostname(host)); - buffer_json_array_close(wb); - - buffer_json_member_add_array(wb, "mirrored_hosts_status"); - rrdhost_foreach_read(host) { - if ((host == localhost || !rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) { - web_client_api_request_v1_info_mirrored_hosts_status(wb, host); - } - } - rrdhost_foreach_read(host) { - if ((host != localhost && rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) { - web_client_api_request_v1_info_mirrored_hosts_status(wb, host); - } - } - buffer_json_array_close(wb); - - rrd_rdunlock(); -} - -void host_labels2json(RRDHOST *host, BUFFER *wb, const char *key) { - buffer_json_member_add_object(wb, key); - rrdlabels_to_buffer_json_members(host->rrdlabels, wb); - buffer_json_object_close(wb); -} - -static void host_collectors(RRDHOST *host, BUFFER *wb) { - buffer_json_member_add_array(wb, "collectors"); - - DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE); - RRDSET *st; - char name[500]; - - time_t now = now_realtime_sec(); - - rrdset_foreach_read(st, host) { - if (!rrdset_is_available_for_viewers(st)) - continue; - - sprintf(name, "%s:%s", rrdset_plugin_name(st), rrdset_module_name(st)); - - bool old = 0; - bool *set = dictionary_set(dict, name, &old, sizeof(bool)); - if(!*set) { - *set = true; - st->last_accessed_time_s = now; - buffer_json_add_array_item_object(wb); - buffer_json_member_add_string(wb, "plugin", rrdset_plugin_name(st)); - buffer_json_member_add_string(wb, "module", rrdset_module_name(st)); - buffer_json_object_close(wb); - } - } - rrdset_foreach_done(st); - dictionary_destroy(dict); - - buffer_json_array_close(wb); -} - -extern int aclk_connected; -inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb) { - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - - buffer_json_member_add_string(wb, "version", rrdhost_program_version(host)); - buffer_json_member_add_string(wb, "uid", host->machine_guid); - - buffer_json_member_add_uint64(wb, "hosts-available", rrdhost_hosts_available()); - web_client_api_request_v1_info_mirrored_hosts(wb); - - web_client_api_request_v1_info_summary_alarm_statuses(host, wb, "alarms"); - - buffer_json_member_add_string_or_empty(wb, "os_name", host->system_info->host_os_name); - buffer_json_member_add_string_or_empty(wb, "os_id", host->system_info->host_os_id); - buffer_json_member_add_string_or_empty(wb, "os_id_like", host->system_info->host_os_id_like); - buffer_json_member_add_string_or_empty(wb, "os_version", host->system_info->host_os_version); - buffer_json_member_add_string_or_empty(wb, "os_version_id", host->system_info->host_os_version_id); - buffer_json_member_add_string_or_empty(wb, "os_detection", host->system_info->host_os_detection); - buffer_json_member_add_string_or_empty(wb, "cores_total", host->system_info->host_cores); - buffer_json_member_add_string_or_empty(wb, "total_disk_space", host->system_info->host_disk_space); - buffer_json_member_add_string_or_empty(wb, "cpu_freq", host->system_info->host_cpu_freq); - buffer_json_member_add_string_or_empty(wb, "ram_total", host->system_info->host_ram_total); - - buffer_json_member_add_string_or_omit(wb, "container_os_name", host->system_info->container_os_name); - buffer_json_member_add_string_or_omit(wb, "container_os_id", host->system_info->container_os_id); - buffer_json_member_add_string_or_omit(wb, "container_os_id_like", host->system_info->container_os_id_like); - buffer_json_member_add_string_or_omit(wb, "container_os_version", host->system_info->container_os_version); - buffer_json_member_add_string_or_omit(wb, "container_os_version_id", host->system_info->container_os_version_id); - buffer_json_member_add_string_or_omit(wb, "container_os_detection", host->system_info->container_os_detection); - buffer_json_member_add_string_or_omit(wb, "is_k8s_node", host->system_info->is_k8s_node); - - buffer_json_member_add_string_or_empty(wb, "kernel_name", host->system_info->kernel_name); - buffer_json_member_add_string_or_empty(wb, "kernel_version", host->system_info->kernel_version); - buffer_json_member_add_string_or_empty(wb, "architecture", host->system_info->architecture); - buffer_json_member_add_string_or_empty(wb, "virtualization", host->system_info->virtualization); - buffer_json_member_add_string_or_empty(wb, "virt_detection", host->system_info->virt_detection); - buffer_json_member_add_string_or_empty(wb, "container", host->system_info->container); - buffer_json_member_add_string_or_empty(wb, "container_detection", host->system_info->container_detection); - - buffer_json_member_add_string_or_omit(wb, "cloud_provider_type", host->system_info->cloud_provider_type); - buffer_json_member_add_string_or_omit(wb, "cloud_instance_type", host->system_info->cloud_instance_type); - buffer_json_member_add_string_or_omit(wb, "cloud_instance_region", host->system_info->cloud_instance_region); - - host_labels2json(host, wb, "host_labels"); - host_functions2json(host, wb); - host_collectors(host, wb); - - buffer_json_member_add_boolean(wb, "cloud-enabled", netdata_cloud_enabled); - -#ifdef ENABLE_ACLK - buffer_json_member_add_boolean(wb, "cloud-available", true); -#else - buffer_json_member_add_boolean(wb, "cloud-available", false); -#endif - - char *agent_id = get_agent_claimid(); - buffer_json_member_add_boolean(wb, "agent-claimed", agent_id != NULL); - freez(agent_id); - -#ifdef ENABLE_ACLK - buffer_json_member_add_boolean(wb, "aclk-available", aclk_connected); -#else - buffer_json_member_add_boolean(wb, "aclk-available", false); -#endif - - buffer_json_member_add_string(wb, "memory-mode", rrd_memory_mode_name(host->rrd_memory_mode)); -#ifdef ENABLE_DBENGINE - buffer_json_member_add_uint64(wb, "multidb-disk-quota", default_multidb_disk_quota_mb); - buffer_json_member_add_uint64(wb, "page-cache-size", default_rrdeng_page_cache_mb); -#endif // ENABLE_DBENGINE - buffer_json_member_add_boolean(wb, "web-enabled", web_server_mode != WEB_SERVER_MODE_NONE); - buffer_json_member_add_boolean(wb, "stream-enabled", default_rrdpush_enabled); - - buffer_json_member_add_boolean(wb, "stream-compression", - host->sender && host->sender->compressor.initialized); - -#ifdef ENABLE_HTTPS - buffer_json_member_add_boolean(wb, "https-enabled", true); -#else - buffer_json_member_add_boolean(wb, "https-enabled", false); -#endif - - buffer_json_member_add_quoted_string(wb, "buildinfo", analytics_data.netdata_buildinfo); - buffer_json_member_add_quoted_string(wb, "release-channel", analytics_data.netdata_config_release_channel); - buffer_json_member_add_quoted_string(wb, "notification-methods", analytics_data.netdata_notification_methods); - - buffer_json_member_add_boolean(wb, "exporting-enabled", analytics_data.exporting_enabled); - buffer_json_member_add_quoted_string(wb, "exporting-connectors", analytics_data.netdata_exporting_connectors); - - buffer_json_member_add_uint64(wb, "allmetrics-prometheus-used", analytics_data.prometheus_hits); - buffer_json_member_add_uint64(wb, "allmetrics-shell-used", analytics_data.shell_hits); - buffer_json_member_add_uint64(wb, "allmetrics-json-used", analytics_data.json_hits); - buffer_json_member_add_uint64(wb, "dashboard-used", analytics_data.dashboard_hits); - - buffer_json_member_add_uint64(wb, "charts-count", analytics_data.charts_count); - buffer_json_member_add_uint64(wb, "metrics-count", analytics_data.metrics_count); - -#if defined(ENABLE_ML) - buffer_json_member_add_object(wb, "ml-info"); - ml_host_get_info(host, wb); - buffer_json_object_close(wb); -#endif - - buffer_json_finalize(wb); - return 0; -} - -#if defined(ENABLE_ML) -int web_client_api_request_v1_ml_info(RRDHOST *host, struct web_client *w, char *url) { - (void) url; - - if (!netdata_ready) - return HTTP_RESP_SERVICE_UNAVAILABLE; - - BUFFER *wb = w->response.data; - buffer_flush(wb); - wb->content_type = CT_APPLICATION_JSON; - - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - ml_host_get_detection_info(host, wb); - buffer_json_finalize(wb); - - buffer_no_cacheable(wb); - - return HTTP_RESP_OK; -} -#endif // ENABLE_ML - -inline int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url) { - (void)url; - if (!netdata_ready) return HTTP_RESP_SERVICE_UNAVAILABLE; - BUFFER *wb = w->response.data; - buffer_flush(wb); - wb->content_type = CT_APPLICATION_JSON; - - web_client_api_request_v1_info_fill_buffer(host, wb); - - buffer_no_cacheable(wb); - return HTTP_RESP_OK; -} - -static int web_client_api_request_v1_aclk_state(RRDHOST *host, struct web_client *w, char *url) { - UNUSED(url); - UNUSED(host); - if (!netdata_ready) return HTTP_RESP_SERVICE_UNAVAILABLE; - - BUFFER *wb = w->response.data; - buffer_flush(wb); -#ifdef ENABLE_ACLK - char *str = aclk_state_json(); - buffer_strcat(wb, str); - freez(str); -#else - buffer_strcat(wb, "{\"aclk-available\":false}"); -#endif - - wb->content_type = CT_APPLICATION_JSON; - buffer_no_cacheable(wb); - return HTTP_RESP_OK; -} - -int web_client_api_request_v1_metric_correlations(RRDHOST *host, struct web_client *w, char *url) { - return web_client_api_request_weights(host, w, url, default_metric_correlations_method, WEIGHTS_FORMAT_CHARTS, 1); -} - -int web_client_api_request_v1_weights(RRDHOST *host, struct web_client *w, char *url) { - return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_ANOMALY_RATE, WEIGHTS_FORMAT_CONTEXTS, 1); -} - -static void web_client_progress_functions_update(void *data, size_t done, size_t all) { - // handle progress updates from the plugin - struct web_client *w = data; - query_progress_functions_update(&w->transaction, done, all); -} - -int web_client_api_request_v1_function(RRDHOST *host, struct web_client *w, char *url) { - if (!netdata_ready) - return HTTP_RESP_SERVICE_UNAVAILABLE; - - int timeout = 0; - const char *function = NULL; - - while (url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) - continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) - continue; - - if (!strcmp(name, "function")) - function = value; - - else if (!strcmp(name, "timeout")) - timeout = (int) strtoul(value, NULL, 0); - } - - BUFFER *wb = w->response.data; - buffer_flush(wb); - wb->content_type = CT_APPLICATION_JSON; - buffer_no_cacheable(wb); - - char transaction[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(w->transaction, transaction); - - CLEAN_BUFFER *source = buffer_create(100, NULL); - web_client_source2buffer(w, source); - - return rrd_function_run(host, wb, timeout, w->access, function, true, transaction, - NULL, NULL, - web_client_progress_functions_update, w, - web_client_interrupt_callback, w, NULL, - buffer_tostring(source)); -} - -int web_client_api_request_v1_functions(RRDHOST *host, struct web_client *w, char *url __maybe_unused) { - if (!netdata_ready) - return HTTP_RESP_SERVICE_UNAVAILABLE; - - BUFFER *wb = w->response.data; - buffer_flush(wb); - wb->content_type = CT_APPLICATION_JSON; - buffer_no_cacheable(wb); - - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - host_functions2json(host, wb); - buffer_json_finalize(wb); - - return HTTP_RESP_OK; -} - -void web_client_source2buffer(struct web_client *w, BUFFER *source) { - if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_CLOUD)) - buffer_sprintf(source, "method=NC"); - else if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_BEARER)) - buffer_sprintf(source, "method=api-bearer"); - else - buffer_sprintf(source, "method=api"); - - if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_GOD)) - buffer_strcat(source, ",role=god"); - else - buffer_sprintf(source, ",role=%s", http_id2user_role(w->user_role)); - - buffer_sprintf(source, ",permissions="HTTP_ACCESS_FORMAT, (HTTP_ACCESS_FORMAT_CAST)w->access); - - if(w->auth.client_name[0]) - buffer_sprintf(source, ",user=%s", w->auth.client_name); - - if(!uuid_is_null(w->auth.cloud_account_id)) { - char uuid_str[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(w->auth.cloud_account_id, uuid_str); - buffer_sprintf(source, ",account=%s", uuid_str); - } - - if(w->client_ip[0]) - buffer_sprintf(source, ",ip=%s", w->client_ip); - - if(w->forwarded_for) - buffer_sprintf(source, ",forwarded_for=%s", w->forwarded_for); -} - -static int web_client_api_request_v1_config(RRDHOST *host, struct web_client *w, char *url __maybe_unused) { - char *action = "tree"; - char *path = "/"; - char *id = NULL; - char *add_name = NULL; - int timeout = 120; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "action")) - action = value; - else if(!strcmp(name, "path")) - path = value; - else if(!strcmp(name, "id")) - id = value; - else if(!strcmp(name, "name")) - add_name = value; - else if(!strcmp(name, "timeout")) { - timeout = (int)strtol(value, NULL, 10); - if(timeout < 10) - timeout = 10; - } - } - - char transaction[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(w->transaction, transaction); - - size_t len = strlen(action) + (id ? strlen(id) : 0) + strlen(path) + (add_name ? strlen(add_name) : 0) + 100; - - char cmd[len]; - if(strcmp(action, "tree") == 0) - snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " tree '%s' '%s'", path, id?id:""); - else { - DYNCFG_CMDS c = dyncfg_cmds2id(action); - if(!id || !*id || !dyncfg_is_valid_id(id)) { - rrd_call_function_error(w->response.data, "invalid id given", HTTP_RESP_BAD_REQUEST); - return HTTP_RESP_BAD_REQUEST; - } - - if(c == DYNCFG_CMD_NONE) { - rrd_call_function_error(w->response.data, "invalid action given", HTTP_RESP_BAD_REQUEST); - return HTTP_RESP_BAD_REQUEST; - } - - if(c == DYNCFG_CMD_ADD || c == DYNCFG_CMD_USERCONFIG || c == DYNCFG_CMD_TEST) { - if(c == DYNCFG_CMD_TEST && (!add_name || !*add_name)) { - // backwards compatibility for TEST without a name - char *colon = strrchr(id, ':'); - if(colon) { - *colon = '\0'; - add_name = ++colon; - } - else - add_name = "test"; - } - - if(!add_name || !*add_name || !dyncfg_is_valid_id(add_name)) { - rrd_call_function_error(w->response.data, "invalid name given", HTTP_RESP_BAD_REQUEST); - return HTTP_RESP_BAD_REQUEST; - } - snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " %s %s %s", id, dyncfg_id2cmd_one(c), add_name); - } - else - snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " %s %s", id, dyncfg_id2cmd_one(c)); - } - - CLEAN_BUFFER *source = buffer_create(100, NULL); - web_client_source2buffer(w, source); - - buffer_flush(w->response.data); - int code = rrd_function_run(host, w->response.data, timeout, w->access, cmd, - true, transaction, - NULL, NULL, - web_client_progress_functions_update, w, - web_client_interrupt_callback, w, - w->payload, buffer_tostring(source)); - - return code; -} - -#ifndef ENABLE_DBENGINE -int web_client_api_request_v1_dbengine_stats(RRDHOST *host __maybe_unused, struct web_client *w __maybe_unused, char *url __maybe_unused) { - return HTTP_RESP_NOT_FOUND; -} -#else -static void web_client_api_v1_dbengine_stats_for_tier(BUFFER *wb, size_t tier) { - RRDENG_SIZE_STATS stats = rrdeng_size_statistics(multidb_ctx[tier]); - - buffer_sprintf(wb, - "\n\t\t\"default_granularity_secs\":%zu" - ",\n\t\t\"sizeof_datafile\":%zu" - ",\n\t\t\"sizeof_page_in_cache\":%zu" - ",\n\t\t\"sizeof_point_data\":%zu" - ",\n\t\t\"sizeof_page_data\":%zu" - ",\n\t\t\"pages_per_extent\":%zu" - ",\n\t\t\"datafiles\":%zu" - ",\n\t\t\"extents\":%zu" - ",\n\t\t\"extents_pages\":%zu" - ",\n\t\t\"points\":%zu" - ",\n\t\t\"metrics\":%zu" - ",\n\t\t\"metrics_pages\":%zu" - ",\n\t\t\"extents_compressed_bytes\":%zu" - ",\n\t\t\"pages_uncompressed_bytes\":%zu" - ",\n\t\t\"pages_duration_secs\":%lld" - ",\n\t\t\"single_point_pages\":%zu" - ",\n\t\t\"first_t\":%ld" - ",\n\t\t\"last_t\":%ld" - ",\n\t\t\"database_retention_secs\":%lld" - ",\n\t\t\"average_compression_savings\":%0.2f" - ",\n\t\t\"average_point_duration_secs\":%0.2f" - ",\n\t\t\"average_metric_retention_secs\":%0.2f" - ",\n\t\t\"ephemeral_metrics_per_day_percent\":%0.2f" - ",\n\t\t\"average_page_size_bytes\":%0.2f" - ",\n\t\t\"estimated_concurrently_collected_metrics\":%zu" - ",\n\t\t\"currently_collected_metrics\":%zu" - ",\n\t\t\"disk_space\":%zu" - ",\n\t\t\"max_disk_space\":%zu" - , stats.default_granularity_secs - , stats.sizeof_datafile - , stats.sizeof_page_in_cache - , stats.sizeof_point_data - , stats.sizeof_page_data - , stats.pages_per_extent - , stats.datafiles - , stats.extents - , stats.extents_pages - , stats.points - , stats.metrics - , stats.metrics_pages - , stats.extents_compressed_bytes - , stats.pages_uncompressed_bytes - , (long long)stats.pages_duration_secs - , stats.single_point_pages - , stats.first_time_s - , stats.last_time_s - , (long long)stats.database_retention_secs - , stats.average_compression_savings - , stats.average_point_duration_secs - , stats.average_metric_retention_secs - , stats.ephemeral_metrics_per_day_percent - , stats.average_page_size_bytes - , stats.estimated_concurrently_collected_metrics - , stats.currently_collected_metrics - , stats.disk_space - , stats.max_disk_space - ); -} -int web_client_api_request_v1_dbengine_stats(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) { - if (!netdata_ready) - return HTTP_RESP_SERVICE_UNAVAILABLE; - - BUFFER *wb = w->response.data; - buffer_flush(wb); - - if(!dbengine_enabled) { - buffer_strcat(wb, "dbengine is not enabled"); - return HTTP_RESP_NOT_FOUND; - } - - wb->content_type = CT_APPLICATION_JSON; - buffer_no_cacheable(wb); - buffer_strcat(wb, "{"); - for(size_t tier = 0; tier < storage_tiers ;tier++) { - buffer_sprintf(wb, "%s\n\t\"tier%zu\": {", tier?",":"", tier); - web_client_api_v1_dbengine_stats_for_tier(wb, tier); - buffer_strcat(wb, "\n\t}"); - } - buffer_strcat(wb, "\n}"); - - return HTTP_RESP_OK; -} -#endif - -#define HLT_MGM "manage/health" -int web_client_api_request_v1_mgmt(RRDHOST *host, struct web_client *w, char *url) { - const char *haystack = buffer_tostring(w->url_path_decoded); - char *needle; - - buffer_flush(w->response.data); - - if ((needle = strstr(haystack, HLT_MGM)) == NULL) { - buffer_strcat(w->response.data, "Invalid management request. Curently only 'health' is supported."); - return HTTP_RESP_NOT_FOUND; - } - needle += strlen(HLT_MGM); - if (*needle != '\0') { - buffer_strcat(w->response.data, "Invalid management request. Currently only 'health' is supported."); - return HTTP_RESP_NOT_FOUND; - } - return web_client_api_request_v1_mgmt_health(host, w, url); -} +#include "v1/api_v1_calls.h" +#include "v2/api_v2_calls.h" +#include "v3/api_v3_calls.h" static struct web_api_command api_commands_v1[] = { // time-series data APIs { .api = "data", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_data, + .callback = api_v1_data, .allow_subpaths = 0 }, +#if defined(ENABLE_API_V1) { .api = "weights", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_weights, + .callback = api_v1_weights, .allow_subpaths = 0 }, { // deprecated - do not use anymore - use "weights" .api = "metric_correlations", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_metric_correlations, + .callback = api_v1_metric_correlations, .allow_subpaths = 0 }, +#endif { // exporting API .api = "allmetrics", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, - .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_allmetrics, - .allow_subpaths = 0 - }, - { - // badges can be fetched with both dashboard and badge ACL - .api = "badge.svg", - .hash = 0, - .acl = HTTP_ACL_BADGES, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_badge, + .callback = api_v1_allmetrics, .allow_subpaths = 0 }, // alerts APIs +#if defined(ENABLE_API_V1) { .api = "alarms", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_alarms, + .callback = api_v1_alarms, .allow_subpaths = 0 }, { .api = "alarms_values", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_alarms_values, + .callback = api_v1_alarms_values, .allow_subpaths = 0 }, { .api = "alarm_log", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_alarm_log, + .callback = api_v1_alarm_log, .allow_subpaths = 0 }, { .api = "alarm_variables", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_alarm_variables, + .callback = api_v1_alarm_variables, .allow_subpaths = 0 }, { .api = "variable", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_variable, + .callback = api_v1_variable, .allow_subpaths = 0 }, { .api = "alarm_count", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_alarm_count, + .callback = api_v1_alarm_count, .allow_subpaths = 0 }, +#endif // functions APIs - they check permissions per function call { .api = "function", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_FUNCTIONS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_function, + .callback = api_v1_function, .allow_subpaths = 0 }, + +#if defined(ENABLE_API_V1) { .api = "functions", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_FUNCTIONS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_functions, + .callback = api_v1_functions, .allow_subpaths = 0 }, +#endif // time-series metadata APIs +#if defined(ENABLE_API_V1) { .api = "chart", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_chart, + .callback = api_v1_chart, .allow_subpaths = 0 }, +#endif { .api = "charts", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_charts, + .callback = api_v1_charts, .allow_subpaths = 0 }, { .api = "context", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_context, + .callback = api_v1_context, .allow_subpaths = 0 }, { .api = "contexts", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_contexts, + .callback = api_v1_contexts, .allow_subpaths = 0 }, // registry APIs +#if defined(ENABLE_API_V1) { // registry checks the ACL by itself, so we allow everything .api = "registry", .hash = 0, .acl = HTTP_ACL_NONE, // it manages acl by itself .access = HTTP_ACCESS_NONE, // it manages access by itself - .callback = web_client_api_request_v1_registry, + .callback = api_v1_registry, .allow_subpaths = 0 }, +#endif // agent information APIs +#if defined(ENABLE_API_V1) { .api = "info", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_info, + .callback = api_v1_info, .allow_subpaths = 0 }, { .api = "aclk", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_aclk_state, + .callback = api_v1_aclk, .allow_subpaths = 0 }, { // deprecated - use /api/v2/info .api = "dbengine_stats", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_dbengine_stats, + .callback = api_v1_dbengine_stats, .allow_subpaths = 0 }, - - // dyncfg APIs { - .api = "config", + .api = "ml_info", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_config, + .callback = api_v1_ml_info, .allow_subpaths = 0 }, - -#if defined(ENABLE_ML) { - .api = "ml_info", - .hash = 0, - .acl = HTTP_ACL_DASHBOARD, - .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v1_ml_info, - .allow_subpaths = 0 + .api = "manage", + .hash = 0, + .acl = HTTP_ACL_MANAGEMENT, + .access = HTTP_ACCESS_NONE, // it manages access by itself + .callback = api_v1_manage, + .allow_subpaths = 1 }, #endif + // dyncfg APIs { - // deprecated - .api = "manage", + .api = "config", .hash = 0, - .acl = HTTP_ACL_MANAGEMENT, - .access = HTTP_ACCESS_NONE, // it manages access by itself - .callback = web_client_api_request_v1_mgmt, - .allow_subpaths = 1 + .acl = HTTP_ACL_DYNCFG, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_config, + .allow_subpaths = 0 }, { diff --git a/src/web/api/web_api_v1.h b/src/web/api/web_api_v1.h index cf0efbd13c33f8..c102ac75c1da30 100644 --- a/src/web/api/web_api_v1.h +++ b/src/web/api/web_api_v1.h @@ -5,43 +5,7 @@ #include "web_api.h" -struct web_client; - -CONTEXTS_V2_OPTIONS web_client_api_request_v2_context_options(char *o); -CONTEXTS_V2_ALERT_STATUS web_client_api_request_v2_alert_status(char *o); -void web_client_api_request_v2_contexts_options_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_V2_OPTIONS options); -void web_client_api_request_v2_contexts_alerts_status_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_V2_ALERT_STATUS options); - -RRDR_OPTIONS rrdr_options_parse(char *o); -RRDR_OPTIONS rrdr_options_parse_one(const char *o); - -void rrdr_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options); -void rrdr_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options); -void web_client_api_request_v1_data_options_to_string(char *buf, size_t size, RRDR_OPTIONS options); - -uint32_t web_client_api_request_v1_data_format(char *name); -uint32_t web_client_api_request_v1_data_google_format(char *name); - -int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)); -int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url); -int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url); int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url_path_endpoint); -int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb); - -void web_client_api_v1_init(void); -void web_client_api_v1_management_init(void); - -void host_labels2json(RRDHOST *host, BUFFER *wb, const char *key); -void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb, const char *key); - -void web_client_source2buffer(struct web_client *w, BUFFER *source); extern char *api_secret; diff --git a/src/web/api/web_api_v2.c b/src/web/api/web_api_v2.c index c62ed9ed37cf22..f8e52a94b2c5cb 100644 --- a/src/web/api/web_api_v2.c +++ b/src/web/api/web_api_v2.c @@ -1,604 +1,27 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "web_api_v2.h" -#include "../rtc/webrtc.h" - -static bool verify_agent_uuids(const char *machine_guid, const char *node_id, const char *claim_id) { - if(!machine_guid || !node_id || !claim_id) - return false; - - if(strcmp(machine_guid, localhost->machine_guid) != 0) - return false; - - char *agent_claim_id = get_agent_claimid(); - - bool not_verified = (!agent_claim_id || strcmp(claim_id, agent_claim_id) != 0); - freez(agent_claim_id); - - if(not_verified || !localhost->node_id) - return false; - - char buf[UUID_STR_LEN]; - uuid_unparse_lower(*localhost->node_id, buf); - - if(strcmp(node_id, buf) != 0) - return false; - - return true; -} - -int api_v2_bearer_protection(RRDHOST *host __maybe_unused, struct web_client *w __maybe_unused, char *url) { - char *machine_guid = NULL; - char *claim_id = NULL; - char *node_id = NULL; - bool protection = netdata_is_protected_by_bearer; - - while (url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) continue; - if (!value || !*value) continue; - - if(!strcmp(name, "bearer_protection")) { - if(!strcmp(value, "on") || !strcmp(value, "true") || !strcmp(value, "yes")) - protection = true; - else - protection = false; - } - else if(!strcmp(name, "machine_guid")) - machine_guid = value; - else if(!strcmp(name, "claim_id")) - claim_id = value; - else if(!strcmp(name, "node_id")) - node_id = value; - } - - if(!verify_agent_uuids(machine_guid, node_id, claim_id)) { - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "The request is missing or not matching local UUIDs"); - return HTTP_RESP_BAD_REQUEST; - } - - netdata_is_protected_by_bearer = protection; - - BUFFER *wb = w->response.data; - buffer_flush(wb); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - buffer_json_member_add_boolean(wb, "bearer_protection", netdata_is_protected_by_bearer); - buffer_json_finalize(wb); - - return HTTP_RESP_OK; -} - -int api_v2_bearer_token(RRDHOST *host __maybe_unused, struct web_client *w __maybe_unused, char *url __maybe_unused) { - char *machine_guid = NULL; - char *claim_id = NULL; - char *node_id = NULL; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) continue; - if (!value || !*value) continue; - - if(!strcmp(name, "machine_guid")) - machine_guid = value; - else if(!strcmp(name, "claim_id")) - claim_id = value; - else if(!strcmp(name, "node_id")) - node_id = value; - } - - if(!verify_agent_uuids(machine_guid, node_id, claim_id)) { - buffer_flush(w->response.data); - buffer_strcat(w->response.data, "The request is missing or not matching local UUIDs"); - return HTTP_RESP_BAD_REQUEST; - } - - nd_uuid_t uuid; - time_t expires_s = bearer_create_token(&uuid, w); - - BUFFER *wb = w->response.data; - buffer_flush(wb); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - buffer_json_member_add_string(wb, "mg", localhost->machine_guid); - buffer_json_member_add_boolean(wb, "bearer_protection", netdata_is_protected_by_bearer); - buffer_json_member_add_uuid(wb, "token", &uuid); - buffer_json_member_add_time_t(wb, "expiration", expires_s); - buffer_json_finalize(wb); - - return HTTP_RESP_OK; -} - -static int web_client_api_request_v2_contexts_internal(RRDHOST *host __maybe_unused, struct web_client *w, char *url, CONTEXTS_V2_MODE mode) { - struct api_v2_contexts_request req = { 0 }; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "scope_nodes")) - req.scope_nodes = value; - else if(!strcmp(name, "nodes")) - req.nodes = value; - else if((mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) && !strcmp(name, "scope_contexts")) - req.scope_contexts = value; - else if((mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) && !strcmp(name, "contexts")) - req.contexts = value; - else if((mode & CONTEXTS_V2_SEARCH) && !strcmp(name, "q")) - req.q = value; - else if(!strcmp(name, "options")) - req.options = web_client_api_request_v2_context_options(value); - else if(!strcmp(name, "after")) - req.after = str2l(value); - else if(!strcmp(name, "before")) - req.before = str2l(value); - else if(!strcmp(name, "timeout")) - req.timeout_ms = str2l(value); - else if(mode & (CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) { - if (!strcmp(name, "alert")) - req.alerts.alert = value; - else if (!strcmp(name, "transition")) - req.alerts.transition = value; - else if(mode & CONTEXTS_V2_ALERTS) { - if (!strcmp(name, "status")) - req.alerts.status = web_client_api_request_v2_alert_status(value); - } - else if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) { - if (!strcmp(name, "last")) - req.alerts.last = strtoul(value, NULL, 0); - else if(!strcmp(name, "context")) - req.contexts = value; - else if (!strcmp(name, "anchor_gi")) { - req.alerts.global_id_anchor = str2ull(value, NULL); - } - else { - for(int i = 0; i < ATF_TOTAL_ENTRIES ;i++) { - if(!strcmp(name, alert_transition_facets[i].query_param)) - req.alerts.facets[i] = value; - } - } - } - } - } - - if ((mode & CONTEXTS_V2_ALERT_TRANSITIONS) && !req.alerts.last) - req.alerts.last = 1; - - buffer_flush(w->response.data); - buffer_no_cacheable(w->response.data); - return rrdcontext_to_json_v2(w->response.data, &req, mode); -} - -static int web_client_api_request_v2_alert_transitions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_ALERT_TRANSITIONS | CONTEXTS_V2_NODES); -} - -static int web_client_api_request_v2_alerts(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_ALERTS | CONTEXTS_V2_NODES); -} - -static int web_client_api_request_v2_functions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_FUNCTIONS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS); -} - -static int web_client_api_request_v2_versions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_VERSIONS); -} - -static int web_client_api_request_v2_q(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_SEARCH | CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS); -} - -static int web_client_api_request_v2_contexts(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS); -} - -static int web_client_api_request_v2_nodes(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_NODES | CONTEXTS_V2_NODES_INFO); -} - -static int web_client_api_request_v2_info(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_AGENTS | CONTEXTS_V2_AGENTS_INFO); -} - -static int web_client_api_request_v2_node_instances(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_NODES | CONTEXTS_V2_NODE_INSTANCES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_AGENTS_INFO | CONTEXTS_V2_VERSIONS); -} - -static int web_client_api_request_v2_weights(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_VALUE, WEIGHTS_FORMAT_MULTINODE, 2); -} - -static int web_client_api_request_v2_claim(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - return api_v2_claim(w, url); -} - -static int web_client_api_request_v2_alert_config(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - const char *config = NULL; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "config")) - config = value; - } - - buffer_flush(w->response.data); - - if(!config) { - w->response.data->content_type = CT_TEXT_PLAIN; - buffer_strcat(w->response.data, "A config hash ID is required. Add ?config=UUID query param"); - return HTTP_RESP_BAD_REQUEST; - } - - return contexts_v2_alert_config_to_json(w, config); -} - - -#define GROUP_BY_KEY_MAX_LENGTH 30 -static struct { - char group_by[GROUP_BY_KEY_MAX_LENGTH + 1]; - char aggregation[GROUP_BY_KEY_MAX_LENGTH + 1]; - char group_by_label[GROUP_BY_KEY_MAX_LENGTH + 1]; -} group_by_keys[MAX_QUERY_GROUP_BY_PASSES]; - -__attribute__((constructor)) void initialize_group_by_keys(void) { - for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { - snprintfz(group_by_keys[g].group_by, GROUP_BY_KEY_MAX_LENGTH, "group_by[%zu]", g); - snprintfz(group_by_keys[g].aggregation, GROUP_BY_KEY_MAX_LENGTH, "aggregation[%zu]", g); - snprintfz(group_by_keys[g].group_by_label, GROUP_BY_KEY_MAX_LENGTH, "group_by_label[%zu]", g); - } -} - -static int web_client_api_request_v2_data(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - usec_t received_ut = now_monotonic_usec(); - - int ret = HTTP_RESP_BAD_REQUEST; - - buffer_flush(w->response.data); - - char *google_version = "0.6", - *google_reqId = "0", - *google_sig = "0", - *google_out = "json", - *responseHandler = NULL, - *outFileName = NULL; - - time_t last_timestamp_in_data = 0, google_timestamp = 0; - - char *scope_nodes = NULL; - char *scope_contexts = NULL; - char *nodes = NULL; - char *contexts = NULL; - char *instances = NULL; - char *dimensions = NULL; - char *before_str = NULL; - char *after_str = NULL; - char *resampling_time_str = NULL; - char *points_str = NULL; - char *timeout_str = NULL; - char *labels = NULL; - char *alerts = NULL; - char *time_group_options = NULL; - char *tier_str = NULL; - size_t tier = 0; - RRDR_TIME_GROUPING time_group = RRDR_GROUPING_AVERAGE; - DATASOURCE_FORMAT format = DATASOURCE_JSON2; - RRDR_OPTIONS options = RRDR_OPTION_VIRTUAL_POINTS | RRDR_OPTION_JSON_WRAP | RRDR_OPTION_RETURN_JWAR; - - struct group_by_pass group_by[MAX_QUERY_GROUP_BY_PASSES] = { - { - .group_by = RRDR_GROUP_BY_DIMENSION, - .group_by_label = NULL, - .aggregation = RRDR_GROUP_BY_FUNCTION_AVERAGE, - }, - }; - - size_t group_by_idx = 0, group_by_label_idx = 0, aggregation_idx = 0; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "scope_nodes")) scope_nodes = value; - else if(!strcmp(name, "scope_contexts")) scope_contexts = value; - else if(!strcmp(name, "nodes")) nodes = value; - else if(!strcmp(name, "contexts")) contexts = value; - else if(!strcmp(name, "instances")) instances = value; - else if(!strcmp(name, "dimensions")) dimensions = value; - else if(!strcmp(name, "labels")) labels = value; - else if(!strcmp(name, "alerts")) alerts = value; - else if(!strcmp(name, "after")) after_str = value; - else if(!strcmp(name, "before")) before_str = value; - else if(!strcmp(name, "points")) points_str = value; - else if(!strcmp(name, "timeout")) timeout_str = value; - else if(!strcmp(name, "group_by")) { - group_by[group_by_idx++].group_by = group_by_parse(value); - if(group_by_idx >= MAX_QUERY_GROUP_BY_PASSES) - group_by_idx = MAX_QUERY_GROUP_BY_PASSES - 1; - } - else if(!strcmp(name, "group_by_label")) { - group_by[group_by_label_idx++].group_by_label = value; - if(group_by_label_idx >= MAX_QUERY_GROUP_BY_PASSES) - group_by_label_idx = MAX_QUERY_GROUP_BY_PASSES - 1; - } - else if(!strcmp(name, "aggregation")) { - group_by[aggregation_idx++].aggregation = group_by_aggregate_function_parse(value); - if(aggregation_idx >= MAX_QUERY_GROUP_BY_PASSES) - aggregation_idx = MAX_QUERY_GROUP_BY_PASSES - 1; - } - else if(!strcmp(name, "format")) format = web_client_api_request_v1_data_format(value); - else if(!strcmp(name, "options")) options |= rrdr_options_parse(value); - else if(!strcmp(name, "time_group")) time_group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE); - else if(!strcmp(name, "time_group_options")) time_group_options = value; - else if(!strcmp(name, "time_resampling")) resampling_time_str = value; - else if(!strcmp(name, "tier")) tier_str = value; - else if(!strcmp(name, "callback")) responseHandler = value; - else if(!strcmp(name, "filename")) outFileName = value; - else if(!strcmp(name, "tqx")) { - // parse Google Visualization API options - // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source - char *tqx_name, *tqx_value; - - while(value) { - tqx_value = strsep_skip_consecutive_separators(&value, ";"); - if(!tqx_value || !*tqx_value) continue; - - tqx_name = strsep_skip_consecutive_separators(&tqx_value, ":"); - if(!tqx_name || !*tqx_name) continue; - if(!tqx_value || !*tqx_value) continue; - - if(!strcmp(tqx_name, "version")) - google_version = tqx_value; - else if(!strcmp(tqx_name, "reqId")) - google_reqId = tqx_value; - else if(!strcmp(tqx_name, "sig")) { - google_sig = tqx_value; - google_timestamp = strtoul(google_sig, NULL, 0); - } - else if(!strcmp(tqx_name, "out")) { - google_out = tqx_value; - format = web_client_api_request_v1_data_google_format(google_out); - } - else if(!strcmp(tqx_name, "responseHandler")) - responseHandler = tqx_value; - else if(!strcmp(tqx_name, "outFileName")) - outFileName = tqx_value; - } - } - else { - for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { - if(!strcmp(name, group_by_keys[g].group_by)) - group_by[g].group_by = group_by_parse(value); - else if(!strcmp(name, group_by_keys[g].group_by_label)) - group_by[g].group_by_label = value; - else if(!strcmp(name, group_by_keys[g].aggregation)) - group_by[g].aggregation = group_by_aggregate_function_parse(value); - } - } - } - - // validate the google parameters given - fix_google_param(google_out); - fix_google_param(google_sig); - fix_google_param(google_reqId); - fix_google_param(google_version); - fix_google_param(responseHandler); - fix_google_param(outFileName); - - for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { - if (group_by[g].group_by_label && *group_by[g].group_by_label) - group_by[g].group_by |= RRDR_GROUP_BY_LABEL; - } - - if(group_by[0].group_by == RRDR_GROUP_BY_NONE) - group_by[0].group_by = RRDR_GROUP_BY_DIMENSION; - - for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) { - if ((group_by[g].group_by & ~(RRDR_GROUP_BY_DIMENSION)) || (options & RRDR_OPTION_PERCENTAGE)) { - options |= RRDR_OPTION_ABSOLUTE; - break; - } - } - - if(options & RRDR_OPTION_DEBUG) - options &= ~RRDR_OPTION_MINIFY; - - if(tier_str && *tier_str) { - tier = str2ul(tier_str); - if(tier < storage_tiers) - options |= RRDR_OPTION_SELECTED_TIER; - else - tier = 0; - } - - time_t before = (before_str && *before_str)?str2l(before_str):0; - time_t after = (after_str && *after_str) ?str2l(after_str):-600; - size_t points = (points_str && *points_str)?str2u(points_str):0; - int timeout = (timeout_str && *timeout_str)?str2i(timeout_str): 0; - time_t resampling_time = (resampling_time_str && *resampling_time_str) ? str2l(resampling_time_str) : 0; - - QUERY_TARGET_REQUEST qtr = { - .version = 2, - .scope_nodes = scope_nodes, - .scope_contexts = scope_contexts, - .after = after, - .before = before, - .host = NULL, - .st = NULL, - .nodes = nodes, - .contexts = contexts, - .instances = instances, - .dimensions = dimensions, - .alerts = alerts, - .timeout_ms = timeout, - .points = points, - .format = format, - .options = options, - .time_group_method = time_group, - .time_group_options = time_group_options, - .resampling_time = resampling_time, - .tier = tier, - .chart_label_key = NULL, - .labels = labels, - .query_source = QUERY_SOURCE_API_DATA, - .priority = STORAGE_PRIORITY_NORMAL, - .received_ut = received_ut, - - .interrupt_callback = web_client_interrupt_callback, - .interrupt_callback_data = w, - - .transaction = &w->transaction, - }; - - for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) - qtr.group_by[g] = group_by[g]; - - QUERY_TARGET *qt = query_target_create(&qtr); - ONEWAYALLOC *owa = NULL; - - if(!qt) { - buffer_sprintf(w->response.data, "Failed to prepare the query."); - ret = HTTP_RESP_INTERNAL_SERVER_ERROR; - goto cleanup; - } - - web_client_timeout_checkpoint_set(w, timeout); - if(web_client_timeout_checkpoint_and_check(w, NULL)) { - ret = w->response.code; - goto cleanup; - } - - if(outFileName && *outFileName) { - buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName); - netdata_log_debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName); - } - - if(format == DATASOURCE_DATATABLE_JSONP) { - if(responseHandler == NULL) - responseHandler = "google.visualization.Query.setResponse"; - - netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'", - w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName - ); - - buffer_sprintf( - w->response.data, - "%s({version:'%s',reqId:'%s',status:'ok',sig:'%"PRId64"',table:", - responseHandler, - google_version, - google_reqId, - (int64_t)now_realtime_sec()); - } - else if(format == DATASOURCE_JSONP) { - if(responseHandler == NULL) - responseHandler = "callback"; - - buffer_strcat(w->response.data, responseHandler); - buffer_strcat(w->response.data, "("); - } - - owa = onewayalloc_create(0); - ret = data_query_execute(owa, w->response.data, qt, &last_timestamp_in_data); - - if(format == DATASOURCE_DATATABLE_JSONP) { - if(google_timestamp < last_timestamp_in_data) - buffer_strcat(w->response.data, "});"); - - else { - // the client already has the latest data - buffer_flush(w->response.data); - buffer_sprintf(w->response.data, - "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});", - responseHandler, google_version, google_reqId); - } - } - else if(format == DATASOURCE_JSONP) - buffer_strcat(w->response.data, ");"); - - if(qt->internal.relative) - buffer_no_cacheable(w->response.data); - else - buffer_cacheable(w->response.data); - -cleanup: - query_target_release(qt); - onewayalloc_destroy(owa); - return ret; -} - -static int web_client_api_request_v2_webrtc(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) { - return webrtc_new_connection(buffer_tostring(w->payload), w->response.data); -} - -static int web_client_api_request_v2_progress(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { - char *transaction = NULL; - - while(url) { - char *value = strsep_skip_consecutive_separators(&url, "&"); - if(!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if(!name || !*name) continue; - if(!value || !*value) continue; - - // name and value are now the parameters - // they are not null and not empty - - if(!strcmp(name, "transaction")) transaction = value; - } - - nd_uuid_t tr; - uuid_parse_flexi(transaction, tr); - - rrd_function_call_progresser(&tr); - - return web_api_v2_report_progress(&tr, w->response.data); -} +#include "v1/api_v1_calls.h" +#include "v2/api_v2_calls.h" +#include "v3/api_v3_calls.h" static struct web_api_command api_commands_v2[] = { +#if defined(ENABLE_API_v2) // time-series multi-node multi-instance data APIs { .api = "data", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_data, + .callback = api_v2_data, .allow_subpaths = 0 }, { .api = "weights", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_weights, + .callback = api_v2_weights, .allow_subpaths = 0 }, @@ -606,18 +29,18 @@ static struct web_api_command api_commands_v2[] = { { .api = "contexts", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_contexts, + .callback = api_v2_contexts, .allow_subpaths = 0 }, { // full text search .api = "q", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_METRICS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_q, + .callback = api_v2_q, .allow_subpaths = 0 }, @@ -625,25 +48,25 @@ static struct web_api_command api_commands_v2[] = { { .api = "alerts", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_alerts, + .callback = api_v2_alerts, .allow_subpaths = 0 }, { .api = "alert_transitions", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_alert_transitions, + .callback = api_v2_alert_transitions, .allow_subpaths = 0 }, { .api = "alert_config", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_ALERTS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_alert_config, + .callback = api_v2_alert_config, .allow_subpaths = 0 }, @@ -651,41 +74,41 @@ static struct web_api_command api_commands_v2[] = { { .api = "info", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NOCHECK, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_info, + .callback = api_v2_info, .allow_subpaths = 0 }, { .api = "nodes", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_nodes, + .callback = api_v2_nodes, .allow_subpaths = 0 }, { .api = "node_instances", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_node_instances, + .callback = api_v2_node_instances, .allow_subpaths = 0 }, { .api = "versions", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NODES, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_versions, + .callback = api_v2_versions, .allow_subpaths = 0 }, { .api = "progress", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NOCHECK, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_progress, + .callback = api_v2_progress, .allow_subpaths = 0 }, @@ -693,9 +116,9 @@ static struct web_api_command api_commands_v2[] = { { .api = "functions", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_FUNCTIONS, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_functions, + .callback = api_v2_functions, .allow_subpaths = 0 }, @@ -705,7 +128,7 @@ static struct web_api_command api_commands_v2[] = { .hash = 0, .acl = HTTP_ACL_ACLK | ACL_DEV_OPEN_ACCESS, .access = HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE, - .callback = web_client_api_request_v2_webrtc, + .callback = api_v2_webrtc, .allow_subpaths = 0 }, @@ -715,7 +138,7 @@ static struct web_api_command api_commands_v2[] = { .hash = 0, .acl = HTTP_ACL_NOCHECK, .access = HTTP_ACCESS_NONE, - .callback = web_client_api_request_v2_claim, + .callback = api_v2_claim, .allow_subpaths = 0 }, { @@ -731,17 +154,18 @@ static struct web_api_command api_commands_v2[] = { .hash = 0, .acl = HTTP_ACL_ACLK | ACL_DEV_OPEN_ACCESS, .access = HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE, - .callback = api_v2_bearer_token, + .callback = api_v2_bearer_get_token, .allow_subpaths = 0 }, +#endif // Netdata branding APIs { .api = "ilove.svg", .hash = 0, - .acl = HTTP_ACL_DASHBOARD, + .acl = HTTP_ACL_NOCHECK, .access = HTTP_ACCESS_ANONYMOUS_DATA, - .callback = web_client_api_request_v2_ilove, + .callback = api_v2_ilove, .allow_subpaths = 0 }, diff --git a/src/web/api/web_api_v3.c b/src/web/api/web_api_v3.c new file mode 100644 index 00000000000000..00b9e8015907e6 --- /dev/null +++ b/src/web/api/web_api_v3.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "web_api_v3.h" +#include "v1/api_v1_calls.h" +#include "v2/api_v2_calls.h" +#include "v3/api_v3_calls.h" + +static struct web_api_command api_commands_v3[] = { + // time-series multi-node multi-instance data APIs + { + .api = "data", + .hash = 0, + .acl = HTTP_ACL_METRICS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_data, + .allow_subpaths = 0 + }, + // badges can be fetched with both dashboard and badge ACL + { + .api = "badge.svg", + .hash = 0, + .acl = HTTP_ACL_BADGES, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_badge, + .allow_subpaths = 0 + }, + // scoring engine + { + .api = "weights", + .hash = 0, + .acl = HTTP_ACL_METRICS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_weights, + .allow_subpaths = 0 + }, + // exporting API + { + .api = "allmetrics", + .hash = 0, + .acl = HTTP_ACL_METRICS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_allmetrics, + .allow_subpaths = 0 + }, + + // time-series multi-node multi-instance metadata APIs + { + .api = "context", + .hash = 0, + .acl = HTTP_ACL_METRICS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_context, + .allow_subpaths = 0 + }, + { + .api = "contexts", + .hash = 0, + .acl = HTTP_ACL_METRICS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_contexts, + .allow_subpaths = 0 + }, + + // fulltext search + { + .api = "q", + .hash = 0, + .acl = HTTP_ACL_METRICS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_q, + .allow_subpaths = 0 + }, + + // multi-node multi-instance alerts APIs + { + .api = "alerts", + .hash = 0, + .acl = HTTP_ACL_ALERTS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_alerts, + .allow_subpaths = 0 + }, + { + .api = "alert_transitions", + .hash = 0, + .acl = HTTP_ACL_ALERTS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_alert_transitions, + .allow_subpaths = 0 + }, + { + .api = "alert_config", + .hash = 0, + .acl = HTTP_ACL_ALERTS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_alert_config, + .allow_subpaths = 0 + }, + { + .api = "variable", + .hash = 0, + .acl = HTTP_ACL_ALERTS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_variable, + .allow_subpaths = 0 + }, + + // agent information APIs + { + .api = "info", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_NONE, + .callback = api_v2_info, + .allow_subpaths = 0 + }, + { + .api = "nodes", + .hash = 0, + .acl = HTTP_ACL_NODES, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_nodes, + .allow_subpaths = 0 + }, + { + .api = "node_instances", + .hash = 0, + .acl = HTTP_ACL_NODES, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_node_instances, + .allow_subpaths = 0 + }, + { + .api = "versions", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_versions, + .allow_subpaths = 0 + }, + { + .api = "progress", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_progress, + .allow_subpaths = 0 + }, + + // functions APIs + { + .api = "function", + .hash = 0, + .acl = HTTP_ACL_FUNCTIONS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_function, + .allow_subpaths = 0 + }, + { + .api = "functions", + .hash = 0, + .acl = HTTP_ACL_FUNCTIONS, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v2_functions, + .allow_subpaths = 0 + }, + + // dyncfg APIs + { + .api = "config", + .hash = 0, + .acl = HTTP_ACL_DYNCFG, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v1_config, + .allow_subpaths = 0 + }, + + // settings APIs + { + .api = "settings", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_ANONYMOUS_DATA, + .callback = api_v3_settings, + .allow_subpaths = 0 + }, + + // WebRTC APIs + { + .api = "rtc_offer", + .hash = 0, + .acl = HTTP_ACL_ACLK | ACL_DEV_OPEN_ACCESS, + .access = HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE, + .callback = api_v2_webrtc, + .allow_subpaths = 0 + }, + + // management APIs + { + .api = "claim", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_NONE, + .callback = api_v2_claim, + .allow_subpaths = 0 + }, + { + .api = "bearer_protection", + .hash = 0, + .acl = HTTP_ACL_ACLK | ACL_DEV_OPEN_ACCESS, + .access = HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_VIEW_AGENT_CONFIG | HTTP_ACCESS_EDIT_AGENT_CONFIG, + .callback = api_v2_bearer_protection, + .allow_subpaths = 0 + }, + { + .api = "bearer_get_token", + .hash = 0, + .acl = HTTP_ACL_ACLK | ACL_DEV_OPEN_ACCESS, + .access = HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE, + .callback = api_v2_bearer_get_token, + .allow_subpaths = 0 + }, + { + .api = "me", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_NONE, + .callback = api_v3_me, + .allow_subpaths = 0 + }, + + // Netdata branding APIs + { + .api = "ilove.svg", + .hash = 0, + .acl = HTTP_ACL_NOCHECK, + .access = HTTP_ACCESS_NONE, + .callback = api_v2_ilove, + .allow_subpaths = 0 + }, + + {// terminator + .api = NULL, + .hash = 0, + .acl = HTTP_ACL_NONE, + .access = HTTP_ACCESS_NONE, + .callback = NULL, + .allow_subpaths = 0 + }, +}; + +inline int web_client_api_request_v3(RRDHOST *host, struct web_client *w, char *url_path_endpoint) { + static int initialized = 0; + + if(unlikely(initialized == 0)) { + initialized = 1; + + for(int i = 0; api_commands_v3[i].api ; i++) + api_commands_v3[i].hash = simple_hash(api_commands_v3[i].api); + } + + return web_client_api_request_vX(host, w, url_path_endpoint, api_commands_v3); +} diff --git a/src/web/api/web_api_v3.h b/src/web/api/web_api_v3.h new file mode 100644 index 00000000000000..32fa4cd1dd84f1 --- /dev/null +++ b/src/web/api/web_api_v3.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WEB_API_V3_H +#define NETDATA_WEB_API_V3_H + +#include "web_api.h" + +struct web_client; + +int web_client_api_request_v3(RRDHOST *host, struct web_client *w, char *url_path_endpoint); + +#endif //NETDATA_WEB_API_V3_H diff --git a/src/web/server/h2o/http_server.c b/src/web/server/h2o/http_server.c index a079c6afe1a517..2bc6a6c1082414 100644 --- a/src/web/server/h2o/http_server.c +++ b/src/web/server/h2o/http_server.c @@ -24,6 +24,7 @@ static h2o_accept_ctx_t accept_ctx; #define NBUF_INITIAL_SIZE_RESP (4096) #define API_V1_PREFIX "/api/v1/" #define API_V2_PREFIX "/api/v2/" +#define API_V3_PREFIX "/api/v3/" #define HOST_SELECT_PREFIX "/host/" #define HTTPD_CONFIG_SECTION "httpd" @@ -182,13 +183,17 @@ static inline int _netdata_uberhandler(h2o_req_t *req, RRDHOST **host) norm_path.len--; } - unsigned int api_version = 2; - size_t api_loc = h2o_strstr(norm_path.base, norm_path.len, H2O_STRLIT(API_V2_PREFIX)); + unsigned int api_version = 3; + size_t api_loc = h2o_strstr(norm_path.base, norm_path.len, H2O_STRLIT(API_V3_PREFIX)); if (api_loc == SIZE_MAX) { - api_version = 1; - api_loc = h2o_strstr(norm_path.base, norm_path.len, H2O_STRLIT(API_V1_PREFIX)); - if (api_loc == SIZE_MAX) - return 1; + api_version = 2; + api_loc = h2o_strstr(norm_path.base, norm_path.len, H2O_STRLIT(API_V2_PREFIX)); + if (api_loc == SIZE_MAX) { + api_version = 1; + api_loc = h2o_strstr(norm_path.base, norm_path.len, H2O_STRLIT(API_V1_PREFIX)); + if (api_loc == SIZE_MAX) + return 1; + } } // API_V1_PREFIX and API_V2_PREFIX are the same length @@ -235,7 +240,9 @@ static inline int _netdata_uberhandler(h2o_req_t *req, RRDHOST **host) } //inline int web_client_api_request_v2(RRDHOST *host, struct web_client *w, char *url_path_endpoint) { - if (api_version == 2) + if (api_version == 3) + web_client_api_request_v3(*host, &w, path_unescaped); + else if (api_version == 2) web_client_api_request_v2(*host, &w, path_unescaped); else web_client_api_request_v1(*host, &w, path_unescaped); @@ -314,7 +321,7 @@ static int hdl_netdata_conf(h2o_handler_t *self, h2o_req_t *req) return -1; BUFFER *buf = buffer_create(NBUF_INITIAL_SIZE_RESP, NULL); - config_generate(buf, 0); + netdata_conf_generate(buf, 0); void *managed = h2o_mem_alloc_shared(&req->pool, buf->len, NULL); memcpy(managed, buf->buffer, buf->len); diff --git a/src/web/server/static/static-threaded.c b/src/web/server/static/static-threaded.c index a4b24c9ac74035..c75498a917caee 100644 --- a/src/web/server/static/static-threaded.c +++ b/src/web/server/static/static-threaded.c @@ -211,7 +211,6 @@ static void *web_server_add_callback(POLLINFO *pi, short int *events, void *data web_client_set_conn_tcp(w); } -#ifdef ENABLE_HTTPS if ((web_client_check_conn_tcp(w)) && (netdata_ssl_web_server_ctx)) { sock_delnonblock(w->ifd); @@ -239,13 +238,10 @@ static void *web_server_add_callback(POLLINFO *pi, short int *events, void *data sock_setnonblock(w->ifd); } -#endif netdata_log_debug(D_WEB_CLIENT, "%llu: ADDED CLIENT FD %d", w->id, pi->fd); -#ifdef ENABLE_HTTPS cleanup: -#endif worker_is_idle(); return w; } @@ -503,14 +499,12 @@ void *socket_listen_main_static_threaded(void *ptr) { if(!api_sockets.opened) fatal("LISTENER: no listen sockets available."); -#ifdef ENABLE_HTTPS netdata_ssl_validate_certificate = !config_get_boolean(CONFIG_SECTION_WEB, "ssl skip certificate verification", !netdata_ssl_validate_certificate); if(!netdata_ssl_validate_certificate_sender) netdata_log_info("SSL: web server will skip SSL certificates verification."); netdata_ssl_initialize_ctx(NETDATA_SSL_WEB_SERVER_CTX); -#endif // 6 threads is the optimal value // since 6 are the parallel connections browsers will do @@ -526,13 +520,11 @@ void *socket_listen_main_static_threaded(void *ptr) { if (static_threaded_workers_count < 1) static_threaded_workers_count = 1; -#ifdef ENABLE_HTTPS // See https://github.com/netdata/netdata/issues/11081#issuecomment-831998240 for more details if (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110) { static_threaded_workers_count = 1; netdata_log_info("You are running an OpenSSL older than 1.1.0, web server will not enable multithreading."); } -#endif size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_WEB, "web server max sockets", (long long int)(rlimit_nofile.rlim_cur / 4)); diff --git a/src/web/server/web_client.c b/src/web/server/web_client.c index ca1c28e7f9590a..8e5355eca4d21a 100644 --- a/src/web/server/web_client.c +++ b/src/web/server/web_client.c @@ -33,6 +33,7 @@ void web_client_set_conn_webrtc(struct web_client *w) { void web_client_reset_permissions(struct web_client *w) { web_client_flags_clear_auth(w); w->access = HTTP_ACCESS_NONE; + w->user_role = HTTP_USER_ROLE_NONE; } void web_client_set_permissions(struct web_client *w, HTTP_ACCESS access, HTTP_USER_ROLE role, WEB_CLIENT_FLAGS auth) { @@ -97,7 +98,6 @@ static inline int web_client_cork_socket(struct web_client *w __maybe_unused) { return 0; } -#ifdef ENABLE_HTTPS static inline void web_client_enable_wait_from_ssl(struct web_client *w) { if (w->ssl.ssl_errno == SSL_ERROR_WANT_READ) web_client_enable_ssl_wait_receive(w); @@ -108,7 +108,6 @@ static inline void web_client_enable_wait_from_ssl(struct web_client *w) { web_client_disable_ssl_wait_send(w); } } -#endif static inline int web_client_uncork_socket(struct web_client *w __maybe_unused) { #ifdef TCP_CORK @@ -211,6 +210,8 @@ static void web_client_reset_allocations(struct web_client *w, bool free_all) { } memset(w->transaction, 0, sizeof(w->transaction)); + memset(&w->auth, 0, sizeof(w->auth)); + web_client_reset_permissions(w); web_client_flag_clear(w, WEB_CLIENT_ENCODING_GZIP|WEB_CLIENT_ENCODING_DEFLATE); web_client_reset_path_flags(w); @@ -589,7 +590,9 @@ int web_client_api_request(RRDHOST *host, struct web_client *w, char *url_path_f // get the api version char *tok = strsep_skip_consecutive_separators(&url_path_fragment, "/"); if(tok && *tok) { - if(strcmp(tok, "v2") == 0) + if(strcmp(tok, "v3") == 0) + return web_client_api_request_v3(host, w, url_path_fragment); + else if(strcmp(tok, "v2") == 0) return web_client_api_request_v2(host, w, url_path_fragment); else if(strcmp(tok, "v1") == 0) return web_client_api_request_v1(host, w, url_path_fragment); @@ -644,7 +647,6 @@ static inline char *web_client_valid_method(struct web_client *w, char *s) { else if(!strncmp(s, "STREAM ", 7)) { s = &s[7]; -#ifdef ENABLE_HTTPS if (!SSL_connection(&w->ssl) && http_is_using_ssl_force(w)) { w->header_parse_tries = 0; w->header_parse_last_size = 0; @@ -672,7 +674,6 @@ static inline char *web_client_valid_method(struct web_client *w, char *s) { netdata_log_error("The server is configured to always use encrypted connections, please enable the SSL on child with hostname '%s'.",hostname); s = NULL; } -#endif w->mode = HTTP_REQUEST_MODE_STREAM; } @@ -787,7 +788,6 @@ HTTP_VALIDATION http_request_validate(struct web_client *w) { web_client_decode_path_and_query_string(w, encoded_url); *ue = c; -#ifdef ENABLE_HTTPS if ( (web_client_check_conn_tcp(w)) && (netdata_ssl_web_server_ctx) ) { if (!w->ssl.conn && (http_is_using_ssl_force(w) || http_is_using_ssl_default(w)) && (w->mode != HTTP_REQUEST_MODE_STREAM)) { w->header_parse_tries = 0; @@ -796,7 +796,6 @@ HTTP_VALIDATION http_request_validate(struct web_client *w) { return HTTP_VALIDATION_REDIRECT; } } -#endif w->header_parse_tries = 0; w->header_parse_last_size = 0; @@ -817,7 +816,6 @@ HTTP_VALIDATION http_request_validate(struct web_client *w) { static inline ssize_t web_client_send_data(struct web_client *w,const void *buf,size_t len, int flags) { ssize_t bytes; -#ifdef ENABLE_HTTPS if ((web_client_check_conn_tcp(w)) && (netdata_ssl_web_server_ctx)) { if (SSL_connection(&w->ssl)) { bytes = netdata_ssl_write(&w->ssl, buf, len) ; @@ -830,12 +828,6 @@ static inline ssize_t web_client_send_data(struct web_client *w,const void *buf, bytes = send(w->ofd,buf, len , flags); else bytes = -999; -#else - if(web_client_check_conn_tcp(w) || web_client_check_conn_unix(w)) - bytes = send(w->ofd, buf, len, flags); - else - bytes = -999; -#endif return bytes; } @@ -967,7 +959,7 @@ static inline void web_client_send_http_header(struct web_client *w) { size_t count = 0; ssize_t bytes; -#ifdef ENABLE_HTTPS + if ( (web_client_check_conn_tcp(w)) && (netdata_ssl_web_server_ctx) ) { if (SSL_connection(&w->ssl)) { bytes = netdata_ssl_write(&w->ssl, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output)); @@ -996,20 +988,6 @@ static inline void web_client_send_http_header(struct web_client *w) { } else bytes = -999; -#else - if(web_client_check_conn_tcp(w) || web_client_check_conn_unix(w)) { - while ((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) { - count++; - - if (count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) { - netdata_log_error("Cannot send HTTP headers to web client."); - break; - } - } - } - else - bytes = -999; -#endif if(bytes != (ssize_t) buffer_strlen(w->response.header_output)) { if(bytes > 0) @@ -1224,7 +1202,7 @@ static inline int web_client_process_url(RRDHOST *host, struct web_client *w, ch netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: generating netdata.conf ...", w->id); w->response.data->content_type = CT_TEXT_PLAIN; buffer_flush(w->response.data); - config_generate(w->response.data, 0); + netdata_conf_generate(w->response.data, 0); return HTTP_RESP_OK; } #ifdef NETDATA_INTERNAL_CHECKS @@ -1310,11 +1288,7 @@ static bool web_server_log_transport(BUFFER *wb, void *ptr) { if(!w) return false; -#ifdef ENABLE_HTTPS buffer_strcat(wb, SSL_connection(&w->ssl) ? "https" : "http"); -#else - buffer_strcat(wb, "http"); -#endif return true; } @@ -1457,7 +1431,7 @@ void web_client_process_request_from_web_server(struct web_client *w) { return; } break; -#ifdef ENABLE_HTTPS + case HTTP_VALIDATION_REDIRECT: { buffer_flush(w->response.data); @@ -1473,7 +1447,7 @@ void web_client_process_request_from_web_server(struct web_client *w) { w->response.code = HTTP_RESP_HTTPS_UPGRADE; break; } -#endif + case HTTP_VALIDATION_MALFORMED_URL: netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: Malformed URL '%s'.", w->id, w->response.data->buffer); @@ -1857,7 +1831,6 @@ ssize_t web_client_receive(struct web_client *w) errno_clear(); -#ifdef ENABLE_HTTPS if ( (web_client_check_conn_tcp(w)) && (netdata_ssl_web_server_ctx) ) { if (SSL_connection(&w->ssl)) { bytes = netdata_ssl_read(&w->ssl, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1)); @@ -1872,12 +1845,6 @@ ssize_t web_client_receive(struct web_client *w) } else // other connection methods bytes = -1; -#else - if(web_client_check_conn_tcp(w) || web_client_check_conn_unix(w)) - bytes = recv(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1), MSG_DONTWAIT); - else - bytes = -1; -#endif if(likely(bytes > 0)) { w->statistics.received_bytes += bytes; @@ -1960,9 +1927,7 @@ void web_client_reuse_from_cache(struct web_client *w) { BUFFER *b6 = w->url_query_string_decoded; BUFFER *b7 = w->payload; -#ifdef ENABLE_HTTPS NETDATA_SSL ssl = w->ssl; -#endif size_t use_count = w->use_count; size_t *statistics_memory_accounting = w->statistics.memory_accounting; @@ -1974,9 +1939,7 @@ void web_client_reuse_from_cache(struct web_client *w) { w->statistics.memory_accounting = statistics_memory_accounting; w->use_count = use_count; -#ifdef ENABLE_HTTPS w->ssl = ssl; -#endif // restore the pointers of the buffers w->response.data = b1; @@ -1991,9 +1954,7 @@ void web_client_reuse_from_cache(struct web_client *w) { struct web_client *web_client_create(size_t *statistics_memory_accounting) { struct web_client *w = (struct web_client *)callocz(1, sizeof(struct web_client)); -#ifdef ENABLE_HTTPS w->ssl = NETDATA_SSL_UNSET_CONNECTION; -#endif w->use_count = 1; w->statistics.memory_accounting = statistics_memory_accounting; @@ -2011,9 +1972,7 @@ struct web_client *web_client_create(size_t *statistics_memory_accounting) { } void web_client_free(struct web_client *w) { -#ifdef ENABLE_HTTPS netdata_ssl_close(&w->ssl); -#endif web_client_reset_allocations(w, true); diff --git a/src/web/server/web_client.h b/src/web/server/web_client.h index 650ddb3eb55210..45ab888668e34c 100644 --- a/src/web/server/web_client.h +++ b/src/web/server/web_client.h @@ -21,9 +21,7 @@ typedef enum __attribute__((packed)) { HTTP_VALIDATION_EXCESS_REQUEST_DATA, HTTP_VALIDATION_MALFORMED_URL, HTTP_VALIDATION_INCOMPLETE, -#ifdef ENABLE_HTTPS HTTP_VALIDATION_REDIRECT -#endif } HTTP_VALIDATION; typedef enum __attribute__((packed)) { @@ -112,9 +110,9 @@ typedef enum __attribute__((packed)) { #define web_client_check_conn_tcp(w) web_client_flag_check(w, WEB_CLIENT_FLAG_CONN_TCP) #define web_client_check_conn_cloud(w) web_client_flag_check(w, WEB_CLIENT_FLAG_CONN_CLOUD) #define web_client_check_conn_webrtc(w) web_client_flag_check(w, WEB_CLIENT_FLAG_CONN_WEBRTC) - -#define WEB_CLIENT_FLAG_ALL_AUTHS (WEB_CLIENT_FLAG_AUTH_CLOUD | WEB_CLIENT_FLAG_AUTH_BEARER) #define web_client_flags_clear_conn(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_CONN_TCP | WEB_CLIENT_FLAG_CONN_UNIX | WEB_CLIENT_FLAG_CONN_CLOUD | WEB_CLIENT_FLAG_CONN_WEBRTC) + +#define WEB_CLIENT_FLAG_ALL_AUTHS (WEB_CLIENT_FLAG_AUTH_CLOUD | WEB_CLIENT_FLAG_AUTH_BEARER | WEB_CLIENT_FLAG_AUTH_GOD) #define web_client_flags_check_auth(w) web_client_flag_check(w, WEB_CLIENT_FLAG_ALL_AUTHS) #define web_client_flags_clear_auth(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_ALL_AUTHS) @@ -136,7 +134,7 @@ void web_client_set_conn_webrtc(struct web_client *w); #define NETDATA_WEB_REQUEST_MAX_SIZE 65536 #define NETDATA_WEB_DECODED_URL_INITIAL_SIZE 512 -#define CLOUD_USER_NAME_LENGTH 64 +#define CLOUD_CLIENT_NAME_LENGTH 64 struct response { BUFFER *header; // our response header @@ -202,14 +200,12 @@ struct web_client { size_t pollinfo_slot; // POLLINFO slot of the web client size_t pollinfo_filecopy_slot; // POLLINFO slot of the file read -#ifdef ENABLE_HTTPS NETDATA_SSL ssl; -#endif struct { nd_uuid_t bearer_token; nd_uuid_t cloud_account_id; - char client_name[CLOUD_USER_NAME_LENGTH]; + char client_name[CLOUD_CLIENT_NAME_LENGTH]; } auth; struct { // A callback to check if the query should be interrupted / stopped diff --git a/src/web/server/web_client_cache.c b/src/web/server/web_client_cache.c index 654577e8a82bc6..ebc428894670e5 100644 --- a/src/web/server/web_client_cache.c +++ b/src/web/server/web_client_cache.c @@ -119,15 +119,14 @@ struct web_client *web_client_get_from_cache(void) { w->mode = HTTP_REQUEST_MODE_GET; web_client_reset_permissions(w); memset(w->transaction, 0, sizeof(w->transaction)); + memset(&w->auth, 0, sizeof(w->auth)); return w; } void web_client_release_to_cache(struct web_client *w) { -#ifdef ENABLE_HTTPS netdata_ssl_close(&w->ssl); -#endif // unlink it from the used spinlock_lock(&web_clients_cache.used.spinlock); diff --git a/src/web/server/web_server.c b/src/web/server/web_server.c index 3497af13c004f3..f7d6ceca2e24cb 100644 --- a/src/web/server/web_server.c +++ b/src/web/server/web_server.c @@ -134,11 +134,7 @@ void web_client_update_acl_matches(struct web_client *w) { void web_server_log_connection(struct web_client *w, const char *msg) { ND_LOG_STACK lgs[] = { ND_LOG_FIELD_U64(NDF_CONNECTION_ID, w->id), -#ifdef ENABLE_HTTPS ND_LOG_FIELD_TXT(NDF_SRC_TRANSPORT, SSL_connection(&w->ssl) ? "https" : "http"), -#else - ND_LOG_FIELD_TXT(NDF_SRC_TRANSPORT, "http"), -#endif ND_LOG_FIELD_TXT(NDF_SRC_IP, w->client_ip), ND_LOG_FIELD_TXT(NDF_SRC_PORT, w->client_port), ND_LOG_FIELD_TXT(NDF_SRC_FORWARDED_HOST, w->forwarded_host),