From 46420eccd3c22a1fbb63ab3a8c69f020e2def91c Mon Sep 17 00:00:00 2001 From: "Everton H. Taques" <97463920+etaques@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:42:33 -0300 Subject: [PATCH] Revert "add changes to production (#2732)" This reverts commit ca4cb9d353b0f95d7202269c42ec80c12f301066. --- .github/workflows/go-develop.yml | 1 - Makefile | 4 +- VERSION | 2 +- cmd/maestro/main.go | 29 +- cmd/sinker/main.go | 24 +- cmd/sinks/main.go | 36 +- docker/Dockerfile | 2 +- fleet/api/http/logging.go | 42 +- kind/Chart.lock | 6 + kind/Chart.yaml | 2 +- kind/README.md | 58 +- kind/adminer/deployment.yaml | 21 - kind/redis-commander/deployment.yaml | 59 - kind/values.yaml | 7 + maestro/config/authentication_builder.go | 39 +- maestro/config/config_builder.go | 24 +- maestro/config/config_builder_test.go | 29 +- maestro/config/service.go | 31 - maestro/deployment/model.go | 81 - maestro/deployment/repository.go | 200 - maestro/deployment/repository_test.go | 217 - maestro/deployment/service.go | 283 - maestro/deployment/setup_test.go | 70 - maestro/errors/maestro_errors.go | 5 - maestro/kubecontrol/kubecontrol.go | 85 +- maestro/monitor/monitor.go | 91 +- maestro/password/password.go | 93 - maestro/password/password_test.go | 59 - maestro/postgres/init.go | 61 - maestro/redis/consumer/hashset.go | 234 + maestro/redis/consumer/sinker.go | 151 - maestro/redis/consumer/sinks.go | 137 - maestro/redis/consumer/streams.go | 254 + maestro/redis/events.go | 43 +- maestro/redis/events_test.go | 50 - maestro/redis/producer/sink_status.go | 68 - maestro/service.go | 157 +- maestro/service/deploy_service.go | 198 - maestro/service/handle_sinker_test.go | 145 - maestro/service/handle_sinks_test.go | 223 - maestro/service/kubectr_test.go | 24 - maestro/service/metrics_middleware.go | 85 - maestro/service/pbmock_test.go | 26 - maestro/service/producer_test.go | 19 - maestro/service/repository_test.go | 79 - pkg/config/config.go | 1 - policies/api/http/logging.go | 46 +- sinker/backend/backend.go | 38 + sinker/backend/pktvisor/pktvisor.go | 474 ++ sinker/backend/pktvisor/pktvisor_test.go | 5302 +++++++++++++++++ sinker/backend/pktvisor/promwrapper.go | 98 + sinker/backend/pktvisor/types.go | 253 + sinker/config_state_check.go | 63 + sinker/message_handler.go | 306 + sinker/otel/bridgeservice/bridge.go | 89 +- .../kafkafanoutexporter/kafka_exporter.go | 6 +- sinker/otel/orbreceiver/logs.go | 11 +- sinker/otel/orbreceiver/metrics.go | 12 +- sinker/otel/orbreceiver/traces.go | 10 +- sinker/prometheus/client.go | 301 + sinker/redis/consumer/docs.go | 1 + sinker/redis/consumer/events.go | 22 + sinker/redis/consumer/sink_key_expire.go | 73 - sinker/redis/consumer/streams.go | 222 + sinker/redis/producer/docs.go | 1 + sinker/redis/producer/events.go | 37 + sinker/redis/producer/sink_ttl.go | 86 - sinker/redis/producer/sinker_activity.go | 66 - sinker/redis/producer/sinker_idle.go | 58 - sinker/redis/producer/streams.go | 160 + sinker/redis/setup_test.go | 37 +- sinker/redis/sinker.go | 185 + sinker/redis/sinker_test.go | 228 +- sinker/service.go | 75 +- sinks/api/http/logging.go | 27 +- sinks/api/http/metrics.go | 3 +- sinks/postgres/init.go | 13 - sinks/postgres/sinks_test.go | 8 - sinks/redis/consumer/events.go | 21 + sinks/redis/consumer/sink_status_listener.go | 110 - sinks/redis/consumer/streams.go | 24 +- sinks/redis/events.go | 64 - sinks/redis/producer/events.go | 4 - sinks/redis/producer/streams.go | 55 +- sinks/sinks.go | 23 +- sinks/sinks_service.go | 6 +- ui/README.md | 16 +- ui/docker/Dockerfile | 2 +- ui/docker/Dockerfile.buildyarn | 5 +- ui/package.json | 3 +- ui/src/app/@core/core.module.ts | 6 +- ui/src/app/@theme/styles/_overrides.scss | 65 +- .../app/auth/components/auth.component.scss | 14 +- .../app/auth/pages/login/login.component.scss | 2 +- .../auth/pages/register/register.component.ts | 4 +- .../common/interfaces/orb/sink.interface.ts | 5 +- .../orb/sink/config/otlp.config.interface.ts | 8 +- .../common/services/code.editor.service.ts | 6 +- .../dataset/dataset.policies.service.ts | 2 +- ui/src/app/common/services/filter.service.ts | 2 +- ui/src/app/common/services/orb.service.ts | 13 +- .../pages/dashboard/dashboard.component.scss | 2 +- .../dataset-from/dataset-from.component.html | 57 +- .../dataset-from/dataset-from.component.scss | 80 +- .../dataset-from/dataset-from.component.ts | 29 +- .../delete/dataset.delete.component.scss | 6 +- .../add/agent.policy.add.component.html | 6 +- .../add/agent.policy.add.component.scss | 29 +- .../add/agent.policy.add.component.ts | 46 +- .../delete/agent.policy.delete.component.scss | 5 - .../agent.policy.duplicate.confirmation.scss | 9 +- .../agent.policy.duplicate.confirmation.ts | 4 +- .../list/agent.policy.list.component.scss | 4 +- .../list/agent.policy.list.component.ts | 21 +- .../view/agent.policy.view.component.html | 63 +- .../view/agent.policy.view.component.scss | 8 - .../view/agent.policy.view.component.ts | 88 +- .../fleet/agents/add/agent.add.component.scss | 2 +- .../agents/delete/agent.delete.component.scss | 5 - .../fleet/agents/key/agent.key.component.scss | 4 +- .../fleet/agents/key/agent.key.component.ts | 5 +- .../fleet/agents/list/agent.list.component.ts | 40 +- .../agents/match/agent.match.component.ts | 28 +- .../agents/reset/agent.reset.component.html | 20 +- .../agents/reset/agent.reset.component.scss | 43 +- .../agents/reset/agent.reset.component.ts | 6 +- .../agents/view/agent.view.component.html | 114 +- .../agents/view/agent.view.component.scss | 35 +- .../fleet/agents/view/agent.view.component.ts | 3 - .../groups/add/agent.group.add.component.scss | 4 +- .../delete/agent.group.delete.component.scss | 6 +- .../groups/list/agent.group.list.component.ts | 21 +- ui/src/app/pages/pages-menu.ts | 2 +- .../app/pages/profile/profile.component.scss | 40 +- ui/src/app/pages/profile/profile.component.ts | 16 +- .../pages/sinks/add/sink-add.component.html | 4 +- .../pages/sinks/add/sink-add.component.scss | 6 +- .../app/pages/sinks/add/sink-add.component.ts | 22 +- .../sinks/delete/sink.delete.component.scss | 5 - .../sinks/details/sink.details.component.html | 2 +- .../sinks/details/sink.details.component.ts | 4 +- .../pages/sinks/list/sink.list.component.scss | 11 +- .../pages/sinks/list/sink.list.component.ts | 13 +- .../pages/sinks/view/sink.view.component.html | 90 +- .../pages/sinks/view/sink.view.component.scss | 35 +- .../sinks/view/sink.view.component.spec.ts | 2 +- .../pages/sinks/view/sink.view.component.ts | 52 +- .../delete/delete.selected.component.html | 39 +- .../delete/delete.selected.component.scss | 66 +- .../delete/delete.selected.component.ts | 13 +- .../components/filter/filter.component.ts | 8 +- .../agent-backends.component.html | 2 +- .../agent-backends.component.scss | 10 +- .../agent-backends.component.ts | 6 +- .../agent-capabilities.component.scss | 3 - .../agent-groups/agent-groups.component.scss | 4 - .../agent-information.component.scss | 1 - .../agent-information.component.ts | 17 +- .../agent-policies-datasets.component.html | 8 +- .../agent-policies-datasets.component.scss | 5 +- .../agent-policies-datasets.component.ts | 14 +- .../agent-provisioning.component.html | 121 +- .../agent-provisioning.component.scss | 36 +- .../agent-provisioning.component.ts | 52 +- .../policy-datasets.component.html | 7 +- .../policy-datasets.component.scss | 12 +- .../policy-datasets.component.ts | 19 +- .../policy-details.component.html | 6 +- .../policy-details.component.scss | 10 +- .../policy-details.component.ts | 3 +- .../policy-groups.component.html | 2 +- .../policy-groups.component.scss | 4 +- .../policy-groups/policy-groups.component.ts | 3 - .../policy-interface.component.html | 28 +- .../policy-interface.component.scss | 36 +- .../policy-interface.component.ts | 44 +- .../sink-control/sink-control.component.scss | 2 +- .../sink-config/sink-config.component.html | 5 +- .../sink-config/sink-config.component.scss | 19 +- .../sink-config/sink-config.component.spec.ts | 2 +- .../sink/sink-config/sink-config.component.ts | 61 +- .../sink-details/sink-details.component.html | 10 +- .../sink-details/sink-details.component.scss | 10 +- .../sink-details.component.spec.ts | 2 +- .../sink-details/sink-details.component.ts | 27 +- .../tag-control/tag-control.component.scss | 2 +- ui/tslint.json | 5 +- 187 files changed, 9415 insertions(+), 4732 deletions(-) create mode 100644 kind/Chart.lock delete mode 100644 kind/adminer/deployment.yaml delete mode 100644 kind/redis-commander/deployment.yaml delete mode 100644 maestro/config/service.go delete mode 100644 maestro/deployment/model.go delete mode 100644 maestro/deployment/repository.go delete mode 100644 maestro/deployment/repository_test.go delete mode 100644 maestro/deployment/service.go delete mode 100644 maestro/deployment/setup_test.go delete mode 100644 maestro/errors/maestro_errors.go delete mode 100644 maestro/password/password.go delete mode 100644 maestro/password/password_test.go delete mode 100644 maestro/postgres/init.go create mode 100644 maestro/redis/consumer/hashset.go delete mode 100644 maestro/redis/consumer/sinker.go delete mode 100644 maestro/redis/consumer/sinks.go create mode 100644 maestro/redis/consumer/streams.go delete mode 100644 maestro/redis/events_test.go delete mode 100644 maestro/redis/producer/sink_status.go delete mode 100644 maestro/service/deploy_service.go delete mode 100644 maestro/service/handle_sinker_test.go delete mode 100644 maestro/service/handle_sinks_test.go delete mode 100644 maestro/service/kubectr_test.go delete mode 100644 maestro/service/metrics_middleware.go delete mode 100644 maestro/service/pbmock_test.go delete mode 100644 maestro/service/producer_test.go delete mode 100644 maestro/service/repository_test.go create mode 100644 sinker/backend/backend.go create mode 100644 sinker/backend/pktvisor/pktvisor.go create mode 100644 sinker/backend/pktvisor/pktvisor_test.go create mode 100644 sinker/backend/pktvisor/promwrapper.go create mode 100644 sinker/backend/pktvisor/types.go create mode 100644 sinker/config_state_check.go create mode 100644 sinker/message_handler.go create mode 100644 sinker/prometheus/client.go create mode 100644 sinker/redis/consumer/docs.go create mode 100644 sinker/redis/consumer/events.go delete mode 100644 sinker/redis/consumer/sink_key_expire.go create mode 100644 sinker/redis/consumer/streams.go create mode 100644 sinker/redis/producer/docs.go create mode 100644 sinker/redis/producer/events.go delete mode 100644 sinker/redis/producer/sink_ttl.go delete mode 100644 sinker/redis/producer/sinker_activity.go delete mode 100644 sinker/redis/producer/sinker_idle.go create mode 100644 sinker/redis/producer/streams.go create mode 100644 sinker/redis/sinker.go create mode 100644 sinks/redis/consumer/events.go delete mode 100644 sinks/redis/consumer/sink_status_listener.go delete mode 100644 sinks/redis/events.go diff --git a/.github/workflows/go-develop.yml b/.github/workflows/go-develop.yml index b7d9260f0..984b20d0c 100644 --- a/.github/workflows/go-develop.yml +++ b/.github/workflows/go-develop.yml @@ -643,7 +643,6 @@ jobs: - package-policies - package-sinker - package-sinks - - package-maestro - package-ui runs-on: ubuntu-latest diff --git a/Makefile b/Makefile index ed86abff5..746989174 100644 --- a/Makefile +++ b/Makefile @@ -153,7 +153,7 @@ install-helm: install-kubectl: cd /tmp && \ - curl -LO "https://dl.k8s.io/release/v1.27.4/bin/linux/amd64/kubectl" && \ + curl -LO "https://dl.k8s.io/release/v1.22.1/bin/linux/amd64/kubectl" && \ chmod a+x ./kubectl && \ sudo mv ./kubectl /usr/local/bin/kubectl @@ -182,7 +182,7 @@ kind-create-all: kind-create-cluster kind-install-orb kind-upgrade-all: kind-load-images kind-upgrade-orb kind-create-cluster: - kind create cluster --image kindest/node:v1.24.0 --config=./kind/config.yaml + kind create cluster --image kindest/node:v1.22.15 --config=./kind/config.yaml kind-delete-cluster: kind delete cluster diff --git a/VERSION b/VERSION index 697f087f3..1b58cc101 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.28.0 +0.27.0 diff --git a/cmd/maestro/main.go b/cmd/maestro/main.go index fc7ed5027..97f3617c6 100644 --- a/cmd/maestro/main.go +++ b/cmd/maestro/main.go @@ -11,8 +11,6 @@ package main import ( "context" "fmt" - "github.com/jmoiron/sqlx" - "github.com/orb-community/orb/maestro/postgres" "io" "os" "os/signal" @@ -38,10 +36,9 @@ import ( ) const ( - svcName = "maestro" - envPrefix = "orb_maestro" - sinkPrefix = "orb_sinks" - httpPort = "8500" + svcName = "maestro" + envPrefix = "orb_maestro" + httpPort = "8500" ) func main() { @@ -51,9 +48,6 @@ func main() { svcCfg := config.LoadBaseServiceConfig(envPrefix, httpPort) jCfg := config.LoadJaegerConfig(envPrefix) sinksGRPCCfg := config.LoadGRPCConfig("orb", "sinks") - dbCfg := config.LoadPostgresConfig(envPrefix, svcName) - encryptionKey := config.LoadEncryptionKey(sinkPrefix) - svcCfg.EncryptionKey = encryptionKey.Key // logger var logger *zap.Logger @@ -116,10 +110,8 @@ func main() { } sinksGRPCClient := sinksgrpc.NewClient(tracer, sinksGRPCConn, sinksGRPCTimeout, logger) otelCfg := config.LoadOtelConfig(envPrefix) - db := connectToDB(dbCfg, logger) - defer db.Close() - svc := maestro.NewMaestroService(logger, streamEsClient, sinkerEsClient, sinksGRPCClient, otelCfg, db, svcCfg) + svc := maestro.NewMaestroService(logger, streamEsClient, sinkerEsClient, sinksGRPCClient, streamEsCfg, otelCfg) errs := make(chan error, 2) mainContext, mainCancelFunction := context.WithCancel(context.Background()) @@ -140,15 +132,6 @@ func main() { logger.Error(fmt.Sprintf("Maestro service terminated: %s", err)) } -func connectToDB(cfg config.PostgresConfig, logger *zap.Logger) *sqlx.DB { - db, err := postgres.Connect(cfg) - if err != nil { - logger.Error("Failed to connect to postgres", zap.Error(err)) - os.Exit(1) - } - return db -} - func connectToGRPC(cfg config.GRPCConfig, logger *zap.Logger) *grpc.ClientConn { var opts []grpc.DialOption tls, err := strconv.ParseBool(cfg.ClientTLS) @@ -228,7 +211,7 @@ func loadStreamEsConfig(prefix string) config.EsConfig { cfg.AllowEmptyEnv(true) cfg.AutomaticEnv() var esC config.EsConfig - _ = cfg.Unmarshal(&esC) + cfg.Unmarshal(&esC) return esC } @@ -243,6 +226,6 @@ func loadSinkerEsConfig(prefix string) config.EsConfig { cfg.AllowEmptyEnv(true) cfg.AutomaticEnv() var esC config.EsConfig - _ = cfg.Unmarshal(&esC) + cfg.Unmarshal(&esC) return esC } diff --git a/cmd/sinker/main.go b/cmd/sinker/main.go index 0e437f50c..e1ebf79cf 100644 --- a/cmd/sinker/main.go +++ b/cmd/sinker/main.go @@ -9,6 +9,7 @@ package main import ( + "context" "fmt" kitprometheus "github.com/go-kit/kit/metrics/prometheus" "github.com/go-redis/redis/v8" @@ -19,6 +20,10 @@ import ( "github.com/orb-community/orb/pkg/config" policiesgrpc "github.com/orb-community/orb/policies/api/grpc" "github.com/orb-community/orb/sinker" + sinkconfig "github.com/orb-community/orb/sinker/config" + cacheconfig "github.com/orb-community/orb/sinker/redis" + "github.com/orb-community/orb/sinker/redis/consumer" + "github.com/orb-community/orb/sinker/redis/producer" sinksgrpc "github.com/orb-community/orb/sinks/api/grpc" stdprometheus "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -97,12 +102,6 @@ func main() { } cacheClient := connectToRedis(cacheCfg.URL, cacheCfg.Pass, cacheCfg.DB, logger) - defer func(client *redis.Client) { - err := client.Close() - if err != nil { - log.Fatalf(err.Error()) - } - }(cacheClient) esClient := connectToRedis(esCfg.URL, esCfg.Pass, esCfg.DB, logger) defer func(esClient *redis.Client) { @@ -169,6 +168,8 @@ func main() { } sinksGRPCClient := sinksgrpc.NewClient(tracer, sinksGRPCConn, sinksGRPCTimeout, logger) + configRepo := cacheconfig.NewSinkerCache(cacheClient, logger) + configRepo = producer.NewEventStoreMiddleware(configRepo, esClient, logger) gauge := kitprometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: "sinker", Subsystem: "sink", @@ -191,7 +192,7 @@ func main() { otelEnabled := otelCfg.Enable == "true" otelKafkaUrl := otelCfg.KafkaUrl - svc := sinker.New(logger, pubSub, esClient, cacheClient, policiesGRPCClient, fleetGRPCClient, sinksGRPCClient, + svc := sinker.New(logger, pubSub, esClient, configRepo, policiesGRPCClient, fleetGRPCClient, sinksGRPCClient, otelKafkaUrl, otelEnabled, gauge, counter, inputCounter, inMemoryCacheConfig.DefaultExpiration) defer func(svc sinker.Service) { err := svc.Stop() @@ -203,6 +204,7 @@ func main() { errs := make(chan error, 2) go startHTTPServer(svcCfg, errs, logger) + go subscribeToSinksES(svc, configRepo, esClient, esCfg, logger) err = svc.Start() if err != nil { @@ -305,3 +307,11 @@ func initJaeger(svcName, url string, logger *zap.Logger) (opentracing.Tracer, io return tracer, closer } + +func subscribeToSinksES(svc sinker.Service, configRepo sinkconfig.ConfigRepo, client *redis.Client, cfg config.EsConfig, logger *zap.Logger) { + eventStore := consumer.NewEventStore(svc, configRepo, client, cfg.Consumer, logger) + logger.Info("Subscribed to Redis Event Store for sinks") + if err := eventStore.Subscribe(context.Background()); err != nil { + logger.Error("Bootstrap service failed to subscribe to event sourcing", zap.Error(err)) + } +} diff --git a/cmd/sinks/main.go b/cmd/sinks/main.go index bcce2bc0e..be4dffe70 100644 --- a/cmd/sinks/main.go +++ b/cmd/sinks/main.go @@ -11,18 +11,6 @@ package main import ( "context" "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "os" - "os/signal" - "strconv" - "strings" - "syscall" - "time" - authapi "github.com/mainflux/mainflux/auth/api/grpc" mfsdk "github.com/mainflux/mainflux/pkg/sdk/go" opentracing "github.com/opentracing/opentracing-go" @@ -39,6 +27,17 @@ import ( "go.uber.org/zap" "go.uber.org/zap/zapcore" "google.golang.org/grpc/reflection" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "os" + "os/signal" + "strconv" + "strings" + "syscall" + "time" kitprometheus "github.com/go-kit/kit/metrics/prometheus" r "github.com/go-redis/redis/v8" @@ -127,7 +126,6 @@ func main() { go startHTTPServer(tracer, svc, svcCfg, logger, errs) go startGRPCServer(svc, tracer, sinksGRPCCfg, logger, errs) go subscribeToSinkerES(svc, esClient, esCfg, logger) - go subscribeToMaestroStatusES(svc, esClient, esCfg, logger) go func() { c := make(chan os.Signal) @@ -195,7 +193,7 @@ func newSinkService(auth mainflux.AuthServiceClient, logger *zap.Logger, esClien mfsdk := mfsdk.NewSDK(config) svc := sinks.NewSinkService(logger, auth, repoSink, mfsdk, passwordService) - svc = redisprod.NewSinkStreamProducerMiddleware(svc, esClient) + svc = redisprod.NewEventStoreMiddleware(svc, esClient) svc = sinkshttp.NewLoggingMiddleware(svc, logger) svc = sinkshttp.MetricsMiddleware( auth, @@ -288,14 +286,6 @@ func subscribeToSinkerES(svc sinks.SinkService, client *r.Client, cfg config.EsC eventStore := rediscons.NewEventStore(svc, client, cfg.Consumer, logger) logger.Info("Subscribed to Redis Event Store for sinker") if err := eventStore.Subscribe(context.Background()); err != nil { - logger.Error("Bootstrap service failed to subscribe to sinker event sourcing", zap.Error(err)) - } -} - -func subscribeToMaestroStatusES(svc sinks.SinkService, client *r.Client, cfg config.EsConfig, logger *zap.Logger) { - eventStore := rediscons.NewSinkStatusListener(logger, client, svc) - logger.Info("Subscribed to Redis Event Store for maestro") - if err := eventStore.SubscribeToMaestroSinkStatus(context.Background()); err != nil { - logger.Error("Bootstrap service failed to subscribe to maestro event sourcing", zap.Error(err)) + logger.Error("Bootstrap service failed to subscribe to event sourcing", zap.Error(err)) } } diff --git a/docker/Dockerfile b/docker/Dockerfile index 9274f6067..3a740b963 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -13,7 +13,7 @@ RUN make $SVC \ FROM alpine:latest ARG SVC -RUN if [[ "maestro" == "$SVC" ]]; then apk update && apk add --no-cache docker-cli bash curl && curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.27.4/bin/linux/amd64/kubectl && chmod +x ./kubectl && mv ./kubectl /usr/local/bin/kubectl; fi +RUN if [[ "maestro" == "$SVC" ]]; then apk update && apk add --no-cache docker-cli bash curl && curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl && chmod +x ./kubectl && mv ./kubectl /usr/local/bin/kubectl; fi # Certificates are needed so that mailing util can work. COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt COPY --from=builder /exe / diff --git a/fleet/api/http/logging.go b/fleet/api/http/logging.go index d578f615d..f1493a395 100644 --- a/fleet/api/http/logging.go +++ b/fleet/api/http/logging.go @@ -25,7 +25,7 @@ func (l loggingMiddleware) ViewAgentMatchingGroupsByIDInternal(ctx context.Conte zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_agent_matching_groups_by_idinternal", + l.logger.Info("method call: view_agent_matching_groups_by_idinternal", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -39,7 +39,7 @@ func (l loggingMiddleware) ResetAgent(ct context.Context, token string, agentID zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: reset_agent", + l.logger.Info("method call: reset_agent", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -53,7 +53,7 @@ func (l loggingMiddleware) ViewAgentInfoByChannelIDInternal(ctx context.Context, zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_owner_by_channel_id", + l.logger.Info("method call: view_owner_by_channel_id", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -67,7 +67,7 @@ func (l loggingMiddleware) ViewAgentBackend(ctx context.Context, token string, n zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_agent_backend", + l.logger.Info("method call: view_agent_backend", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -81,7 +81,7 @@ func (l loggingMiddleware) ListAgentBackends(ctx context.Context, token string) zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: list_agent_backends", + l.logger.Info("method call: list_agent_backends", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -95,7 +95,7 @@ func (l loggingMiddleware) ViewAgentByIDInternal(ctx context.Context, ownerID st zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_agent_by_id_internal", + l.logger.Info("method call: view_agent_by_id_internal", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -109,7 +109,7 @@ func (l loggingMiddleware) ViewAgentByID(ctx context.Context, token string, thin zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_agent_by_id", + l.logger.Info("method call: view_agent_by_id", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -123,7 +123,7 @@ func (l loggingMiddleware) ViewAgentMatchingGroupsByID(ctx context.Context, toke zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_agent_matching_groups_by_id", + l.logger.Info("method call: view_agent_matching_groups_by_id", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -137,7 +137,7 @@ func (l loggingMiddleware) EditAgent(ctx context.Context, token string, agent fl zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: edit_agent_by_id", + l.logger.Info("method call: edit_agent_by_id", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -151,7 +151,7 @@ func (l loggingMiddleware) ViewAgentGroupByIDInternal(ctx context.Context, group zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_agent_group_by_id_internal", + l.logger.Info("method call: view_agent_group_by_id_internal", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -165,7 +165,7 @@ func (l loggingMiddleware) ViewAgentGroupByID(ctx context.Context, groupID strin zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_agent_group_by_id", + l.logger.Info("method call: view_agent_group_by_id", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -179,7 +179,7 @@ func (l loggingMiddleware) ListAgentGroups(ctx context.Context, token string, pm zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: list_agent_groups", + l.logger.Info("method call: list_agent_groups", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -193,7 +193,7 @@ func (l loggingMiddleware) EditAgentGroup(ctx context.Context, token string, ag zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: edit_agent_groups", + l.logger.Info("method call: edit_agent_groups", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -207,7 +207,7 @@ func (l loggingMiddleware) ListAgents(ctx context.Context, token string, pm flee zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: list_agents", + l.logger.Info("method call: list_agents", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -222,7 +222,7 @@ func (l loggingMiddleware) CreateAgent(ctx context.Context, token string, a flee zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: create_agent", + l.logger.Info("method call: create_agent", zap.String("name", a.Name.String()), zap.Duration("duration", time.Since(begin))) } @@ -238,7 +238,7 @@ func (l loggingMiddleware) CreateAgentGroup(ctx context.Context, token string, s zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: create_agent_group", + l.logger.Info("method call: create_agent_group", zap.String("name", s.Name.String()), zap.Duration("duration", time.Since(begin))) } @@ -253,7 +253,7 @@ func (l loggingMiddleware) RemoveAgentGroup(ctx context.Context, token, groupID zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: delete_agent_groups", + l.logger.Info("method call: delete_agent_groups", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -268,7 +268,7 @@ func (l loggingMiddleware) ValidateAgentGroup(ctx context.Context, token string, zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: validate_agent_group", + l.logger.Info("method call: validate_agent_group", zap.String("name", s.Name.String()), zap.Duration("duration", time.Since(begin))) } @@ -284,7 +284,7 @@ func (l loggingMiddleware) ValidateAgent(ctx context.Context, token string, a fl zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: validate_agent", + l.logger.Info("method call: validate_agent", zap.String("name", a.Name.String()), zap.Duration("duration", time.Since(begin))) } @@ -299,7 +299,7 @@ func (l loggingMiddleware) RemoveAgent(ctx context.Context, token, thingID strin zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: delete_agent", + l.logger.Info("method call: delete_agent", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -314,7 +314,7 @@ func (l loggingMiddleware) GetPolicyState(ctx context.Context, agent fleet.Agent zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: get_policy_state", + l.logger.Info("method call: get_policy_state", zap.Duration("duration", time.Since(begin))) } }(time.Now()) diff --git a/kind/Chart.lock b/kind/Chart.lock new file mode 100644 index 000000000..53732b11b --- /dev/null +++ b/kind/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: orb + repository: https://orb-community.github.io/orb-helm/ + version: 1.0.44 +digest: sha256:054a0e4810a7d857f4c0b156bb92e909f485096242098f62ab5b558140e48a22 +generated: "2023-02-13T13:18:58.67925487-03:00" diff --git a/kind/Chart.yaml b/kind/Chart.yaml index dcbbcf6a7..267789e22 100644 --- a/kind/Chart.yaml +++ b/kind/Chart.yaml @@ -17,5 +17,5 @@ appVersion: "1.0.0" dependencies: - name: orb - version: "1.0.53" + version: "1.0.44" repository: "@orb-community" diff --git a/kind/README.md b/kind/README.md index 5ef3ed777..ea6487b08 100644 --- a/kind/README.md +++ b/kind/README.md @@ -2,6 +2,7 @@ The following steps must be performed at the **root of the Orb project** to set up a local k8s cluster and deploy Orb. + ## 🧱 Requirements - [Docker Environment](#docker) @@ -11,150 +12,118 @@ The following steps must be performed at the **root of the Orb project** to set > **💡 Note:** If you have those installed, please skip to [Deploy Orb on Kind](#deploy-orb-kind). -> ⚠️ You may need to permit ports 80 and 443 (_ingress_) because of [kubernetes](https://kubernetes.io/docs/concepts/services-networking/ingress/). +> ⚠️ You may need to permit ports 80 and 443 (*ingress*) because of [kubernetes](https://kubernetes.io/docs/concepts/services-networking/ingress/). - ## 🐳 Docker Environment (Requirement) Quick install a **Docker** executing: - ```shell make install-docker ``` Check if you have a **Docker** running by executing: - ```shell docker version ``` - If you need help to set up a **Docker Environment**, follow the [steps from here](https://docs.docker.com/engine/install/debian/). - ## ⚓ Helm 3 (Requirement) [Helm](https://helm.sh/) is a package manager for Kubernetes. A Helm Chart is a package that allows you to customize your deployment on Kubernetes. Quick install a **Helm 3** executing: - ```shell make install-helm ``` Check if you have **Helm 3** installed by executing: - ```shell helm version ``` - If you need help to install **Helm 3**, follow the [steps from here](https://helm.sh/docs/intro/install/). > 🚨 **Warning:** Make sure you have version 3 installed, orb helm charts doesn't officially support helm 2. - ## 🐋 Kubectl (Requirement) Quick install a **Kubectl** executing: - ```shell make install-kubectl ``` Check if you have **Kubectl** cmd installed by executing: - ```shell kubectl version --client ``` - If you need help to install **Kubectl**, follow the [steps from here](https://kubernetes.io/docs/tasks/tools/). - ## 🚢 Install Kind (Requirement) Kind is a tool for running local k8s clusters using docker container as nodes. Quick install a **Kind** on Linux executing: - ```shell make install-kind ``` If you have `go 1.17 or later` installed: - ```shell go install sigs.k8s.io/kind@v0.14.0 ``` macOS users can also use `brew`: - ```shell brew install kind ``` > 🚨 **Windows WSL users**: WSL is also supported, but for some reason the Orb stack mess up the WSL internal DNS. > You can fix that by editing your `/etc/wsl.conf` and adding the following: -> > ```shell > [network] > generateResolvConf = false > ``` -> > Restart WSL by executing the following on CMD: -> > ```shell > wsl --shutdown > ``` -> > Open WSL terminal again and remove the symbolic link from `/etc/resolv.conf`: -> > ```shell > sudo unlink /etc/resolv.conf > ``` -> > Create a new `/etc/resolv.conf` file and add the following: -> > ```shell > nameserver 8.8.8.8 > ``` -> > save the file and you are done. - ## 🐋 k9s (Optional) Quick install a **k9s** to manage your cluster executing: - ```shell make install-k9s ``` - -## 🚀 Deploy Orb on Kind +## 🚀 Deploy Orb on Kind Add `kubernetes.docker.internal` host as `127.0.0.1` address in your hosts file: - ```shell echo "127.0.0.1 kubernetes.docker.internal" | sudo tee -a /etc/hosts ``` - > **💡 Note:** This is needed just once Setup **Orb Charts** dependencies repositories: - ```shell make prepare-helm ``` - > **💡 Note:** You just need to run those steps until here once, even if you delete the cluster afterwards. Use the following command to create the cluster and deploy **Orb**: - ```shell make run ``` @@ -165,31 +134,28 @@ E-mail | Password | Role admin@kind.com | pass123456 | Admin Have fun! 🎉 When you are done, you can delete the cluster by running: - ```shell make kind-delete-cluster ``` ## Development flow with Kind -Use the following command to create the empty cluster: +Use the following command to create the empty cluster: ```shell make kind-create-cluster ``` - > **💡 Note:** Now you have and empty kind cluster with minimum necessary to spin up pods -Let's add helm charts for orb: +Let's add helm charts for orb: ```shell make prepare-helm ``` - > **💡 Note:** Now your dependencies are configured -Building all orb images: +Building all orb images: ```shell make dockers ``` @@ -197,7 +163,6 @@ make dockers > **💡 Note:** This can take some time Loading all images into the kind cluster: - ```shell make kind-load-images ``` @@ -205,15 +170,14 @@ make kind-load-images > **💡 Note:** Your are loading from your local docker registry to kind cluster registry Load just one image to the kind cluster - ```shell kind load docker-image orbcommunity/orb-maestro:0.22.0-088bee14 ``` > **💡 Note:** Do not forget to change **kind/values.yaml** manifest to use your image tag -Install orb application: +Install orb application: ```shell make kind-install-orb ``` @@ -221,29 +185,23 @@ make kind-install-orb > **💡 Note:** Now orb was installed properly If you have any problem to load your new deployment use: - ```shell kubectl rollout restart deployment -n orb ``` - ## Updating inflight service with recent development + If you want to change a service, lets say you added some logs to the fleet service, before committing the changes, add this - ```shell SERVICE=fleet make build_docker ``` - This will build only the docker image of the new service. After changing you can simply execute - ```shell make kind-upgrade-all ``` -Also you can load image using kind command individually, and upgrade your deployment with helm command. You can use redis-commander and adminer to interact with databases on kind environment -
❌ Is it not working correctly? Found a bug? Come talk to us [live on Slack](https://netdev.chat/) in the `#orb` channel, or [file a GitHub issue here](https://github.com/orb-community/orb/issues/new/choose). diff --git a/kind/adminer/deployment.yaml b/kind/adminer/deployment.yaml deleted file mode 100644 index 0ab940ec0..000000000 --- a/kind/adminer/deployment.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: adminer - labels: - app: adminer -spec: - selector: - matchLabels: - app: adminer - template: - metadata: - labels: - app: adminer - spec: - containers: - - name: adminer - image: adminer:latest - ports: - - containerPort: 8080 \ No newline at end of file diff --git a/kind/redis-commander/deployment.yaml b/kind/redis-commander/deployment.yaml deleted file mode 100644 index 8c9c6274d..000000000 --- a/kind/redis-commander/deployment.yaml +++ /dev/null @@ -1,59 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: redis-commander - annotations: - # Tell Kubernetes to apply the AppArmor or SecComp profile "runtime/default". (whatever is used) - # Note that this is ignored if the Kubernetes node is not running version 1.4 or greater. - # and fails if AppArmor enabled but profile not found (may happens on borked k8s installs only) - # set to "unconfined" to disable AppArmor (first annotation) or SecComp (second annotation) - container.apparmor.security.beta.kubernetes.io/redis-commander: runtime/default - container.security.alpha.kubernetes.io/redis-commander: runtime/default -spec: - replicas: 1 - selector: - matchLabels: - app: redis-commander - template: - metadata: - labels: - app: redis-commander - tier: backend - spec: - automountServiceAccountToken: false - containers: - - name: redis-commander - image: rediscommander/redis-commander - imagePullPolicy: Always - env: - - name: REDIS_HOSTS - value: "stream-redis:kind-orb-redis-streams-master.orb.svc.cluster.local:6379,sinker-redis:kind-orb-redis-sinker-master.orb.svc.cluster.local:6379:1" - - name: K8S_SIGTERM - value: "1" - - name: HTTP_USER - value: "admin" - - name: HTTP_PASSWORD - value: "admin" - ports: - - name: redis-commander - containerPort: 8081 - livenessProbe: - httpGet: - path: /favicon.png - port: 8081 - initialDelaySeconds: 10 - timeoutSeconds: 5 - # adapt to your needs base on data stored inside redis (number of keys and size of biggest keys) - # or comment out for less secure installation - resources: - limits: - cpu: "500m" - memory: "512M" - securityContext: - runAsNonRoot: true - readOnlyRootFilesystem: false - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL \ No newline at end of file diff --git a/kind/values.yaml b/kind/values.yaml index 250e31f7d..40662d11e 100644 --- a/kind/values.yaml +++ b/kind/values.yaml @@ -30,6 +30,13 @@ orb: repository: "orbcommunity" tag: "develop" + sinker: + image: + name: "orb-sinker" + pullPolicy: "IfNotPresent" + repository: "orbcommunity" + tag: "develop" + sinkerOtel: image: name: "orb-sinker" diff --git a/maestro/config/authentication_builder.go b/maestro/config/authentication_builder.go index 6555193a1..1a49f5b2e 100644 --- a/maestro/config/authentication_builder.go +++ b/maestro/config/authentication_builder.go @@ -1,7 +1,6 @@ package config import ( - "github.com/orb-community/orb/maestro/password" "github.com/orb-community/orb/pkg/types" "github.com/orb-community/orb/sinks/authentication_type/basicauth" ) @@ -10,26 +9,22 @@ const AuthenticationKey = "authentication" type AuthBuilderService interface { GetExtensionsFromMetadata(config types.Metadata) (Extensions, string) - DecodeAuth(config types.Metadata) (types.Metadata, error) - EncodeAuth(config types.Metadata) (types.Metadata, error) } -func GetAuthService(authType string, service password.EncryptionService) AuthBuilderService { +func GetAuthService(authType string) AuthBuilderService { switch authType { case basicauth.AuthType: - return &BasicAuthBuilder{ - encryptionService: service, - } + return &BasicAuthBuilder{} } return nil } type BasicAuthBuilder struct { - encryptionService password.EncryptionService } -func (b *BasicAuthBuilder) GetExtensionsFromMetadata(c types.Metadata) (Extensions, string) { - authcfg := c.GetSubMetadata(AuthenticationKey) +func (b *BasicAuthBuilder) GetExtensionsFromMetadata(config types.Metadata) (Extensions, string) { + + authcfg := config.GetSubMetadata(AuthenticationKey) username := authcfg["username"].(string) password := authcfg["password"].(string) return Extensions{ @@ -41,27 +36,3 @@ func (b *BasicAuthBuilder) GetExtensionsFromMetadata(c types.Metadata) (Extensio }, }, "basicauth/exporter" } - -func (b *BasicAuthBuilder) DecodeAuth(config types.Metadata) (types.Metadata, error) { - authCfg := config.GetSubMetadata(AuthenticationKey) - password := authCfg["password"].(string) - decodedPassword, err := b.encryptionService.DecodePassword(password) - if err != nil { - return nil, err - } - authCfg["password"] = decodedPassword - config[AuthenticationKey] = authCfg - return config, nil -} - -func (b *BasicAuthBuilder) EncodeAuth(config types.Metadata) (types.Metadata, error) { - authcfg := config.GetSubMetadata(AuthenticationKey) - password := authcfg["password"].(string) - encodedPassword, err := b.encryptionService.EncodePassword(password) - if err != nil { - return nil, err - } - authcfg["password"] = encodedPassword - config[AuthenticationKey] = authcfg - return config, nil -} diff --git a/maestro/config/config_builder.go b/maestro/config/config_builder.go index 007c6320a..ea5cb79d9 100644 --- a/maestro/config/config_builder.go +++ b/maestro/config/config_builder.go @@ -353,36 +353,34 @@ var JsonDeployment = ` } ` -func (c *configBuilder) BuildDeploymentConfig(deployment *DeploymentRequest) (string, error) { +func GetDeploymentJson(kafkaUrl string, sink SinkData) (string, error) { // prepare manifest - manifest := strings.Replace(k8sOtelCollector, "SINK_ID", deployment.SinkID, -1) - ctx := context.WithValue(context.Background(), "sink_id", deployment.SinkID) - config, err := c.ReturnConfigYamlFromSink(ctx, c.kafkaUrl, deployment) + manifest := strings.Replace(k8sOtelCollector, "SINK_ID", sink.SinkID, -1) + config, err := ReturnConfigYamlFromSink(context.Background(), kafkaUrl, sink) if err != nil { - return "", errors.Wrap(errors.New(fmt.Sprintf("failed to build YAML, sink: %s", deployment.SinkID)), err) + return "", errors.Wrap(errors.New(fmt.Sprintf("failed to build YAML, sink: %s", sink.SinkID)), err) } manifest = strings.Replace(manifest, "SINK_CONFIG", config, -1) return manifest, nil } // ReturnConfigYamlFromSink this is the main method, which will generate the YAML file from the -func (c *configBuilder) ReturnConfigYamlFromSink(_ context.Context, kafkaUrlConfig string, deployment *DeploymentRequest) (string, error) { - authType := deployment.Config.GetSubMetadata(AuthenticationKey)["type"] +func ReturnConfigYamlFromSink(_ context.Context, kafkaUrlConfig string, sink SinkData) (string, error) { + authType := sink.Config.GetSubMetadata(AuthenticationKey)["type"] authTypeStr, ok := authType.(string) if !ok { return "", errors.New("failed to create config invalid authentication type") } - // TODO move this into somewhere else - authBuilder := GetAuthService(authTypeStr, c.encryptionService) + authBuilder := GetAuthService(authTypeStr) if authBuilder == nil { return "", errors.New("invalid authentication type") } - exporterBuilder := FromStrategy(deployment.Backend) + exporterBuilder := FromStrategy(sink.Backend) if exporterBuilder == nil { return "", errors.New("invalid backend") } - extensions, extensionName := authBuilder.GetExtensionsFromMetadata(deployment.Config) - exporters, exporterName := exporterBuilder.GetExportersFromMetadata(deployment.Config, extensionName) + extensions, extensionName := authBuilder.GetExtensionsFromMetadata(sink.Config) + exporters, exporterName := exporterBuilder.GetExportersFromMetadata(sink.Config, extensionName) if exporterName == "" { return "", errors.New("failed to build exporter") } @@ -414,7 +412,7 @@ func (c *configBuilder) ReturnConfigYamlFromSink(_ context.Context, kafkaUrlConf Receivers: Receivers{ Kafka: KafkaReceiver{ Brokers: []string{kafkaUrlConfig}, - Topic: fmt.Sprintf("otlp_metrics-%s", deployment.SinkID), + Topic: fmt.Sprintf("otlp_metrics-%s", sink.SinkID), ProtocolVersion: "2.0.0", }, }, diff --git a/maestro/config/config_builder_test.go b/maestro/config/config_builder_test.go index 8981220a6..9e7cd2d04 100644 --- a/maestro/config/config_builder_test.go +++ b/maestro/config/config_builder_test.go @@ -3,18 +3,16 @@ package config import ( "context" "fmt" - "github.com/orb-community/orb/maestro/password" "github.com/orb-community/orb/pkg/types" - "go.uber.org/zap" "testing" + "time" ) func TestReturnConfigYamlFromSink(t *testing.T) { type args struct { in0 context.Context kafkaUrlConfig string - sink *DeploymentRequest - key string + sink SinkData } tests := []struct { name string @@ -27,7 +25,7 @@ func TestReturnConfigYamlFromSink(t *testing.T) { args: args{ in0: context.Background(), kafkaUrlConfig: "kafka:9092", - sink: &DeploymentRequest{ + sink: SinkData{ SinkID: "sink-id-11", OwnerID: "11", Backend: "prometheus", @@ -41,6 +39,9 @@ func TestReturnConfigYamlFromSink(t *testing.T) { "password": "dbpass", }, }, + State: 0, + Msg: "", + LastRemoteWrite: time.Time{}, }, }, want: `---\nreceivers:\n kafka:\n brokers:\n - kafka:9092\n topic: otlp_metrics-sink-id-11\n protocol_version: 2.0.0\nextensions:\n pprof:\n endpoint: 0.0.0.0:1888\n basicauth/exporter:\n client_auth:\n username: prom-user\n password: dbpass\nexporters:\n prometheusremotewrite:\n endpoint: https://acme.com/prom/push\n auth:\n authenticator: basicauth/exporter\nservice:\n extensions:\n - pprof\n - basicauth/exporter\n pipelines:\n metrics:\n receivers:\n - kafka\n exporters:\n - prometheusremotewrite\n`, @@ -51,7 +52,7 @@ func TestReturnConfigYamlFromSink(t *testing.T) { args: args{ in0: context.Background(), kafkaUrlConfig: "kafka:9092", - sink: &DeploymentRequest{ + sink: SinkData{ SinkID: "sink-id-11", OwnerID: "11", Backend: "prometheus", @@ -68,6 +69,9 @@ func TestReturnConfigYamlFromSink(t *testing.T) { "password": "dbpass", }, }, + State: 0, + Msg: "", + LastRemoteWrite: time.Time{}, }, }, want: `---\nreceivers:\n kafka:\n brokers:\n - kafka:9092\n topic: otlp_metrics-sink-id-11\n protocol_version: 2.0.0\nextensions:\n pprof:\n endpoint: 0.0.0.0:1888\n basicauth/exporter:\n client_auth:\n username: prom-user\n password: dbpass\nexporters:\n prometheusremotewrite:\n endpoint: https://acme.com/prom/push\n headers:\n X-Scope-OrgID: TENANT_1\n auth:\n authenticator: basicauth/exporter\nservice:\n extensions:\n - pprof\n - basicauth/exporter\n pipelines:\n metrics:\n receivers:\n - kafka\n exporters:\n - prometheusremotewrite\n`, @@ -78,7 +82,7 @@ func TestReturnConfigYamlFromSink(t *testing.T) { args: args{ in0: context.Background(), kafkaUrlConfig: "kafka:9092", - sink: &DeploymentRequest{ + sink: SinkData{ SinkID: "sink-id-22", OwnerID: "22", Backend: "otlphttp", @@ -92,6 +96,9 @@ func TestReturnConfigYamlFromSink(t *testing.T) { "password": "dbpass", }, }, + State: 0, + Msg: "", + LastRemoteWrite: time.Time{}, }, }, want: `---\nreceivers:\n kafka:\n brokers:\n - kafka:9092\n topic: otlp_metrics-sink-id-22\n protocol_version: 2.0.0\nextensions:\n pprof:\n endpoint: 0.0.0.0:1888\n basicauth/exporter:\n client_auth:\n username: otlp-user\n password: dbpass\nexporters:\n otlphttp:\n endpoint: https://acme.com/otlphttp/push\n auth:\n authenticator: basicauth/exporter\nservice:\n extensions:\n - pprof\n - basicauth/exporter\n pipelines:\n metrics:\n receivers:\n - kafka\n exporters:\n - otlphttp\n`, @@ -99,14 +106,8 @@ func TestReturnConfigYamlFromSink(t *testing.T) { }, } for _, tt := range tests { - logger := zap.NewNop() - c := configBuilder{ - logger: logger, - kafkaUrl: tt.args.kafkaUrlConfig, - encryptionService: password.NewEncryptionService(logger, tt.args.key), - } t.Run(tt.name, func(t *testing.T) { - got, err := c.ReturnConfigYamlFromSink(tt.args.in0, tt.args.kafkaUrlConfig, tt.args.sink) + got, err := ReturnConfigYamlFromSink(tt.args.in0, tt.args.kafkaUrlConfig, tt.args.sink) if (err != nil) != tt.wantErr { t.Errorf("ReturnConfigYamlFromSink() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/maestro/config/service.go b/maestro/config/service.go deleted file mode 100644 index 52fceb2f8..000000000 --- a/maestro/config/service.go +++ /dev/null @@ -1,31 +0,0 @@ -package config - -import ( - "github.com/orb-community/orb/maestro/password" - "github.com/orb-community/orb/pkg/types" - "go.uber.org/zap" -) - -type ConfigBuilder interface { - BuildDeploymentConfig(deployment *DeploymentRequest) (string, error) -} - -type DeploymentRequest struct { - OwnerID string - SinkID string - Config types.Metadata - Backend string - Status string -} - -type configBuilder struct { - logger *zap.Logger - kafkaUrl string - encryptionService password.EncryptionService -} - -var _ ConfigBuilder = (*configBuilder)(nil) - -func NewConfigBuilder(logger *zap.Logger, kafkaUrl string, encryptionService password.EncryptionService) ConfigBuilder { - return &configBuilder{logger: logger, kafkaUrl: kafkaUrl, encryptionService: encryptionService} -} diff --git a/maestro/deployment/model.go b/maestro/deployment/model.go deleted file mode 100644 index 15a0f7894..000000000 --- a/maestro/deployment/model.go +++ /dev/null @@ -1,81 +0,0 @@ -package deployment - -import ( - "encoding/json" - "time" - - "github.com/orb-community/orb/pkg/types" -) - -type Deployment struct { - Id string `db:"id" json:"id,omitempty"` - OwnerID string `db:"owner_id" json:"ownerID,omitempty"` - SinkID string `db:"sink_id" json:"sinkID,omitempty"` - Backend string `db:"backend" json:"backend,omitempty"` - Config []byte `db:"config" json:"config,omitempty"` - LastStatus string `db:"last_status" json:"lastStatus,omitempty"` - LastStatusUpdate *time.Time `db:"last_status_update" json:"lastStatusUpdate"` - LastErrorMessage string `db:"last_error_message" json:"lastErrorMessage,omitempty"` - LastErrorTime *time.Time `db:"last_error_time" json:"lastErrorTime"` - CollectorName string `db:"collector_name" json:"collectorName,omitempty"` - LastCollectorDeployTime *time.Time `db:"last_collector_deploy_time" json:"lastCollectorDeployTime"` - LastCollectorStopTime *time.Time `db:"last_collector_stop_time" json:"lastCollectorStopTime"` -} - -func NewDeployment(ownerID string, sinkID string, config types.Metadata, backend string) Deployment { - now := time.Now() - deploymentName := "otel-" + sinkID - configAsByte := toByte(config) - return Deployment{ - OwnerID: ownerID, - SinkID: sinkID, - Backend: backend, - Config: configAsByte, - LastStatus: "unknown", - LastStatusUpdate: &now, - CollectorName: deploymentName, - } -} - -func (d *Deployment) Merge(other Deployment) error { - if other.Id != "" { - d.Id = other.Id - } - if other.LastErrorMessage != d.LastErrorMessage { - d.LastErrorMessage = other.LastErrorMessage - d.LastErrorTime = other.LastErrorTime - } - if other.CollectorName != "" { - d.CollectorName = other.CollectorName - d.LastCollectorDeployTime = other.LastCollectorDeployTime - d.LastCollectorStopTime = other.LastCollectorStopTime - } - if other.LastStatus != d.LastStatus { - d.LastStatus = other.LastStatus - d.LastStatusUpdate = other.LastStatusUpdate - } - return nil -} - -func (d *Deployment) GetConfig() types.Metadata { - var config types.Metadata - err := json.Unmarshal(d.Config, &config) - if err != nil { - return nil - } - return config -} - -func (d *Deployment) SetConfig(config types.Metadata) error { - configAsByte := toByte(config) - d.Config = configAsByte - return nil -} - -func toByte(config types.Metadata) []byte { - configAsByte, err := json.Marshal(config) - if err != nil { - return nil - } - return configAsByte -} diff --git a/maestro/deployment/repository.go b/maestro/deployment/repository.go deleted file mode 100644 index f3fc0fc48..000000000 --- a/maestro/deployment/repository.go +++ /dev/null @@ -1,200 +0,0 @@ -package deployment - -import ( - "context" - "fmt" - "time" - - "github.com/jmoiron/sqlx" - _ "github.com/lib/pq" // required for SQL access - maestroerrors "github.com/orb-community/orb/maestro/errors" - "github.com/orb-community/orb/pkg/errors" - "go.uber.org/zap" -) - -type Repository interface { - FetchAll(ctx context.Context) ([]Deployment, error) - Add(ctx context.Context, deployment *Deployment) (*Deployment, error) - Update(ctx context.Context, deployment *Deployment) (*Deployment, error) - UpdateStatus(ctx context.Context, ownerID string, sinkId string, status string, errorMessage string) error - Remove(ctx context.Context, ownerId string, sinkId string) error - FindByOwnerAndSink(ctx context.Context, ownerId string, sinkId string) (*Deployment, error) - FindByCollectorName(ctx context.Context, collectorName string) (*Deployment, error) -} - -var _ Repository = (*repositoryService)(nil) - -func NewRepositoryService(db *sqlx.DB, logger *zap.Logger) Repository { - namedLogger := logger.Named("deployment-repository") - return &repositoryService{db: db, logger: namedLogger} -} - -type repositoryService struct { - logger *zap.Logger - db *sqlx.DB -} - -func (r *repositoryService) FetchAll(ctx context.Context) ([]Deployment, error) { - tx := r.db.MustBeginTx(ctx, nil) - var deployments []Deployment - query := ` - SELECT id, - owner_id, - sink_id, - backend, - config, - last_status, - last_status_update, - last_error_message, - last_error_time, - collector_name, - last_collector_deploy_time, - last_collector_stop_time - FROM deployments` - err := tx.SelectContext(ctx, &deployments, query, nil) - if err != nil { - _ = tx.Rollback() - return nil, err - } - err = tx.Commit() - if err != nil { - _ = tx.Rollback() - return nil, err - } - r.logger.Debug("fetched all deployments", zap.Int("count", len(deployments))) - return deployments, nil -} - -func (r *repositoryService) Add(ctx context.Context, deployment *Deployment) (*Deployment, error) { - tx := r.db.MustBeginTx(ctx, nil) - _, err := tx.NamedExecContext(ctx, - `INSERT INTO deployments (owner_id, sink_id, backend, config, last_status, last_status_update, last_error_message, - last_error_time, collector_name, last_collector_deploy_time, last_collector_stop_time) - VALUES (:owner_id, :sink_id, :backend, :config, :last_status, :last_status_update, :last_error_message, - :last_error_time, :collector_name, :last_collector_deploy_time, :last_collector_stop_time)`, - deployment) - if err != nil { - _ = tx.Rollback() - return nil, err - } - - r.logger.Debug("added deployment", zap.String("owner-id", deployment.OwnerID), zap.String("sink-id", deployment.SinkID)) - err = tx.Commit() - if err != nil { - return nil, err - } - got, err := r.FindByOwnerAndSink(ctx, deployment.OwnerID, deployment.SinkID) - if err != nil { - return nil, err - } - deployment.Id = got.Id - return deployment, nil -} - -func (r *repositoryService) Update(ctx context.Context, deployment *Deployment) (*Deployment, error) { - tx := r.db.MustBeginTx(ctx, nil) - _, err := tx.NamedExecContext(ctx, - `UPDATE deployments - SET - owner_id = :owner_id, - sink_id = :sink_id, - backend = :backend, - config = :config, - last_status = :last_status, - last_status_update = :last_status_update, - last_error_message = :last_error_message, - last_error_time = :last_error_time, - collector_name = :collector_name, - last_collector_deploy_time = :last_collector_deploy_time, - last_collector_stop_time = :last_collector_stop_time - WHERE id = :id`, - deployment) - if err != nil { - _ = tx.Rollback() - return nil, err - } - r.logger.Info("update deployment", zap.String("owner-id", deployment.OwnerID), zap.String("sink-id", deployment.SinkID)) - return deployment, tx.Commit() -} - -func (r *repositoryService) UpdateStatus(ctx context.Context, ownerID string, sinkId string, status string, errorMessage string) error { - tx := r.db.MustBeginTx(ctx, nil) - now := time.Now() - fields := map[string]interface{}{ - "last_status": status, - "last_status_update": now, - "last_error_message": errorMessage, - "last_error_time": now, - "owner_id": ownerID, - "sink_id": sinkId, - } - _, err := tx.ExecContext(ctx, - `UPDATE deployments - SET - last_status = :last_status, - last_status_update = :last_status_update, - last_error_message = :last_error_message, - last_error_time = :last_error_time - WHERE owner_id = :owner_id AND sink_id = :sink_id`, - fields) - if err != nil { - _ = tx.Rollback() - return err - } - r.logger.Debug("update deployment", zap.String("owner-id", ownerID), zap.String("sink-id", sinkId)) - return tx.Commit() -} - -func (r *repositoryService) Remove(ctx context.Context, ownerId string, sinkId string) error { - tx := r.db.MustBeginTx(ctx, nil) - tx.MustExecContext(ctx, "DELETE FROM deployments WHERE owner_id = $1 AND sink_id = $2", ownerId, sinkId) - err := tx.Commit() - if err != nil { - _ = tx.Rollback() - return err - } - return nil -} - -func (r *repositoryService) FindByOwnerAndSink(ctx context.Context, ownerId string, sinkId string) (*Deployment, error) { - tx := r.db.MustBeginTx(ctx, nil) - var rows []Deployment - query := `SELECT * FROM deployments WHERE owner_id = $1 AND sink_id = $2` - err := tx.SelectContext(ctx, &rows, query, ownerId, sinkId) - if err != nil { - _ = tx.Rollback() - return nil, err - } - err = tx.Commit() - if err != nil { - return nil, err - } - if len(rows) == 0 { - return nil, maestroerrors.NotFound - } - deployment := &rows[0] - - return deployment, nil -} - -func (r *repositoryService) FindByCollectorName(ctx context.Context, collectorName string) (*Deployment, error) { - tx := r.db.MustBeginTx(ctx, nil) - var rows []Deployment - err := tx.SelectContext(ctx, &rows, "SELECT * FROM deployments WHERE collector_name = :collector_name", - map[string]interface{}{"collector_name": collectorName}) - if err != nil { - _ = tx.Rollback() - return nil, err - } - err = tx.Commit() - if err != nil { - _ = tx.Rollback() - return nil, err - } - if len(rows) == 0 { - return nil, errors.New(fmt.Sprintf("not found deployment for collector name: %s", collectorName)) - } - deployment := &rows[0] - - return deployment, nil -} diff --git a/maestro/deployment/repository_test.go b/maestro/deployment/repository_test.go deleted file mode 100644 index c4c8366d7..000000000 --- a/maestro/deployment/repository_test.go +++ /dev/null @@ -1,217 +0,0 @@ -package deployment - -import ( - "context" - "encoding/json" - "errors" - maestroerrors "github.com/orb-community/orb/maestro/errors" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "testing" - "time" -) - -func Test_repositoryService_FindByOwnerAndSink(t *testing.T) { - now := time.Now() - deployCreate := &Deployment{ - OwnerID: "owner-1", - SinkID: "sink-1", - Backend: "prometheus", - Config: []byte(`{ - "authentication": { - "username": "user", - "password": "pass" - }, - "exporter" : { - "remote_host": "http://localhost:9090" - } - }`), - LastStatus: "pending", - LastStatusUpdate: &now, - LastErrorMessage: "", - LastErrorTime: &now, - CollectorName: "", - LastCollectorDeployTime: &now, - LastCollectorStopTime: &now, - } - type args struct { - ownerId string - sinkId string - } - tests := []struct { - name string - args args - want *Deployment - wantErr error - }{ - { - name: "FindByOwnerAndSink_success", - args: args{ - ownerId: "owner-1", - sinkId: "sink-1", - }, - want: deployCreate, - wantErr: nil, - }, - { - name: "FindByOwnerAndSink_notFound", - args: args{ - ownerId: "owner-2", - sinkId: "sink-12", - }, - want: deployCreate, - wantErr: maestroerrors.NotFound, - }, - } - - r := &repositoryService{ - logger: zap.NewNop(), - db: pg, - } - _, err := r.Add(context.Background(), deployCreate) - if err != nil { - t.Fatalf("error adding deployment: %v", err) - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.WithValue(context.Background(), "test", tt.name) - got, err := r.FindByOwnerAndSink(ctx, tt.args.ownerId, tt.args.sinkId) - if tt.wantErr != nil && !errors.Is(err, tt.wantErr) { - t.Errorf("FindByOwnerAndSink() error = %v, wantErr %v", err, tt.wantErr) - return - } - if tt.wantErr == nil { - require.Equal(t, tt.want.SinkID, got.SinkID) - require.Equal(t, tt.want.OwnerID, got.OwnerID) - require.Equal(t, tt.want.Backend, got.Backend) - var gotInterface map[string]interface{} - err = json.Unmarshal(got.Config, &gotInterface) - require.NoError(t, err) - var wantInterface map[string]interface{} - err = json.Unmarshal(tt.want.Config, &wantInterface) - require.NoError(t, err) - require.Equal(t, wantInterface, gotInterface) - } - }) - } -} - -func Test_repositoryService_AddUpdateRemove(t *testing.T) { - now := time.Now() - type args struct { - create *Deployment - update *Deployment - } - tests := []struct { - name string - args args - want *Deployment - wantErr bool - }{ - { - name: "update_success", - args: args{ - create: &Deployment{ - OwnerID: "owner-10", - SinkID: "sink-10", - Backend: "prometheus", - Config: []byte(`{ - "authentication": { - "username": "user", - "password": "pass" - }, - "exporter" : { - "remote_host": "http://localhost:9090" - } - }`), - LastStatus: "pending", - LastStatusUpdate: &now, - LastErrorMessage: "", - LastErrorTime: &now, - CollectorName: "", - LastCollectorDeployTime: &now, - LastCollectorStopTime: &now, - }, - update: &Deployment{ - OwnerID: "owner-10", - SinkID: "sink-10", - Backend: "prometheus", - Config: []byte(`{ - "authentication": { - "username": "user2", - "password": "pass2" - }, - "exporter" : { - "remote_host": "http://localhost:9090" - } - }`), - LastStatus: "pending", - LastStatusUpdate: &now, - LastErrorMessage: "", - LastErrorTime: &now, - CollectorName: "", - LastCollectorDeployTime: &now, - LastCollectorStopTime: &now, - }, - }, - want: &Deployment{ - OwnerID: "owner-10", - SinkID: "sink-10", - Backend: "prometheus", - Config: []byte(`{ - "authentication": { - "username": "user2", - "password": "pass2" - }, - "exporter" : { - "remote_host": "http://localhost:9090" - } - }`), - LastStatus: "pending", - LastStatusUpdate: &now, - LastErrorMessage: "", - LastErrorTime: &now, - CollectorName: "", - LastCollectorDeployTime: &now, - LastCollectorStopTime: &now, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.WithValue(context.Background(), "test", tt.name) - r := &repositoryService{ - logger: logger, - db: pg, - } - got, err := r.Add(ctx, tt.args.create) - if (err != nil) != tt.wantErr { - t.Errorf("Add() error = %v, wantErr %v", err, tt.wantErr) - return - } - require.NotEmptyf(t, got.Id, "id should not be empty") - var gotInterface map[string]interface{} - var wantInterface map[string]interface{} - - tt.args.update.Id = got.Id - - got, err = r.Update(ctx, tt.args.update) - if (err != nil) != tt.wantErr { - t.Errorf("Add() error = %v, wantErr %v", err, tt.wantErr) - return - } - require.Equal(t, tt.want.SinkID, got.SinkID) - require.Equal(t, tt.want.OwnerID, got.OwnerID) - require.Equal(t, tt.want.Backend, got.Backend) - err = json.Unmarshal(got.Config, &gotInterface) - require.NoError(t, err) - err = json.Unmarshal(tt.want.Config, &wantInterface) - require.NoError(t, err) - require.Equal(t, wantInterface, gotInterface) - - if err := r.Remove(ctx, tt.want.OwnerID, tt.want.SinkID); (err != nil) != tt.wantErr { - t.Errorf("UpdateStatus() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/maestro/deployment/service.go b/maestro/deployment/service.go deleted file mode 100644 index f188d1166..000000000 --- a/maestro/deployment/service.go +++ /dev/null @@ -1,283 +0,0 @@ -package deployment - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/orb-community/orb/maestro/config" - "github.com/orb-community/orb/maestro/kubecontrol" - "github.com/orb-community/orb/maestro/password" - "github.com/orb-community/orb/maestro/redis/producer" - "github.com/orb-community/orb/pkg/types" - "go.uber.org/zap" -) - -const AuthenticationKey = "authentication" - -type Service interface { - // CreateDeployment to be used to create the deployment when there is a sink.create - CreateDeployment(ctx context.Context, deployment *Deployment) error - // GetDeployment to be used to get the deployment information for creating the collector or monitoring the collector - GetDeployment(ctx context.Context, ownerID string, sinkId string) (*Deployment, string, error) - // UpdateDeployment to be used to update the deployment when there is a sink.update - UpdateDeployment(ctx context.Context, deployment *Deployment) error - // UpdateStatus to be used to update the status of the sink, when there is an error or when the sink is running - UpdateStatus(ctx context.Context, ownerID string, sinkId string, status string, errorMessage string) error - // RemoveDeployment to be used to remove the deployment when there is a sink.delete - RemoveDeployment(ctx context.Context, ownerID string, sinkId string) error - // GetDeploymentByCollectorName to be used to get the deployment information for creating the collector or monitoring the collector - GetDeploymentByCollectorName(ctx context.Context, collectorName string) (*Deployment, error) - // NotifyCollector add collector information to deployment - NotifyCollector(ctx context.Context, ownerID string, sinkId string, operation string, status string, errorMessage string) (string, error) -} - -type deploymentService struct { - dbRepository Repository - logger *zap.Logger - kafkaUrl string - maestroProducer producer.Producer - kubecontrol kubecontrol.Service - configBuilder config.ConfigBuilder - encryptionService password.EncryptionService -} - -var _ Service = (*deploymentService)(nil) - -func NewDeploymentService(logger *zap.Logger, repository Repository, kafkaUrl string, encryptionKey string, - maestroProducer producer.Producer, kubecontrol kubecontrol.Service) Service { - namedLogger := logger.Named("deployment-service") - es := password.NewEncryptionService(logger, encryptionKey) - cb := config.NewConfigBuilder(namedLogger, kafkaUrl, es) - return &deploymentService{logger: namedLogger, - dbRepository: repository, - configBuilder: cb, - encryptionService: es, - maestroProducer: maestroProducer, - kubecontrol: kubecontrol, - } -} - -func (d *deploymentService) CreateDeployment(ctx context.Context, deployment *Deployment) error { - if deployment == nil { - return errors.New("deployment is nil") - } - codedConfig, err := d.encodeConfig(deployment) - if err != nil { - return err - } - err = deployment.SetConfig(codedConfig) - if err != nil { - return err - } - // store with config encrypted - added, err := d.dbRepository.Add(ctx, deployment) - if err != nil { - return err - } - d.logger.Info("added deployment", zap.String("id", added.Id), - zap.String("ownerID", added.OwnerID), zap.String("sinkID", added.SinkID)) - err = d.maestroProducer.PublishSinkStatus(ctx, added.OwnerID, added.SinkID, "unknown", "") - if err != nil { - return err - } - return nil -} - -func (d *deploymentService) getAuthBuilder(authType string) config.AuthBuilderService { - return config.GetAuthService(authType, d.encryptionService) -} - -func (d *deploymentService) encodeConfig(deployment *Deployment) (types.Metadata, error) { - authType := deployment.GetConfig() - if authType == nil { - return nil, errors.New("deployment do not have authentication information") - } - value := authType.GetSubMetadata(AuthenticationKey)["type"].(string) - authBuilder := d.getAuthBuilder(value) - if authBuilder == nil { - return nil, errors.New("deployment do not have authentication information") - } - return authBuilder.EncodeAuth(deployment.GetConfig()) -} - -func (d *deploymentService) GetDeployment(ctx context.Context, ownerID string, sinkId string) (*Deployment, string, error) { - deployment, err := d.dbRepository.FindByOwnerAndSink(ctx, ownerID, sinkId) - if err != nil { - return nil, "", err - } - authType := deployment.GetConfig() - if authType == nil { - return nil, "", errors.New("deployment do not have authentication information") - } - value := authType.GetSubMetadata(AuthenticationKey)["type"].(string) - authBuilder := d.getAuthBuilder(value) - decodedDeployment, err := authBuilder.DecodeAuth(deployment.GetConfig()) - if err != nil { - return nil, "", err - } - err = deployment.SetConfig(decodedDeployment) - if err != nil { - return nil, "", err - } - deployReq := &config.DeploymentRequest{ - OwnerID: ownerID, - SinkID: sinkId, - Config: deployment.GetConfig(), - Backend: deployment.Backend, - Status: deployment.LastStatus, - } - manifest, err := d.configBuilder.BuildDeploymentConfig(deployReq) - if err != nil { - return nil, "", err - } - return deployment, manifest, nil -} - -// UpdateDeployment will stop the running collector if any, and change the deployment, it will not spin the collector back up, -// it will wait for the next sink.activity -func (d *deploymentService) UpdateDeployment(ctx context.Context, deployment *Deployment) error { - now := time.Now() - got, _, err := d.GetDeployment(ctx, deployment.OwnerID, deployment.SinkID) - if err != nil { - return errors.New("could not find deployment to update") - } - // Spin down the collector if it is running - err = d.kubecontrol.KillOtelCollector(ctx, got.CollectorName, got.SinkID) - if err != nil { - d.logger.Warn("could not stop running collector, will try to update anyway", zap.Error(err)) - } - err = got.Merge(*deployment) - if err != nil { - d.logger.Error("error during merge of deployments", zap.Error(err)) - return err - } - got.LastCollectorStopTime = &now - codedConfig, err := d.encodeConfig(deployment) - if err != nil { - return err - } - err = got.SetConfig(codedConfig) - if err != nil { - return err - } - updated, err := d.dbRepository.Update(ctx, got) - if err != nil { - return err - } - err = d.maestroProducer.PublishSinkStatus(ctx, updated.OwnerID, updated.SinkID, "unknown", "") - if err != nil { - return err - } - d.logger.Info("updated deployment", zap.String("ownerID", updated.OwnerID), - zap.String("sinkID", updated.SinkID)) - return nil -} - -func (d *deploymentService) NotifyCollector(ctx context.Context, ownerID string, sinkId string, operation string, - status string, errorMessage string) (string, error) { - got, manifest, err := d.GetDeployment(ctx, ownerID, sinkId) - if err != nil { - return "", errors.New("could not find deployment to update") - } - now := time.Now() - if operation == "delete" { - got.LastCollectorStopTime = &now - err = d.kubecontrol.KillOtelCollector(ctx, got.CollectorName, got.SinkID) - if err != nil { - d.logger.Warn("could not stop running collector, will try to update anyway", zap.Error(err)) - } - } else if operation == "deploy" { - // Spin up the collector - if got.LastCollectorDeployTime == nil || got.LastCollectorDeployTime.Before(now) { - if got.LastCollectorStopTime == nil || got.LastCollectorStopTime.Before(now) { - d.logger.Debug("collector is not running deploying") - got.CollectorName, err = d.kubecontrol.CreateOtelCollector(ctx, got.OwnerID, got.SinkID, manifest) - got.LastCollectorDeployTime = &now - } else { - d.logger.Info("collector is already running") - } - } - - } - if status != "" { - got.LastStatus = status - got.LastStatusUpdate = &now - } - if errorMessage != "" { - got.LastErrorMessage = errorMessage - got.LastErrorTime = &now - } - codedConfig, err := d.encodeConfig(got) - if err != nil { - return "", err - } - err = got.SetConfig(codedConfig) - if err != nil { - return "", err - } - updated, err := d.dbRepository.Update(ctx, got) - if err != nil { - return "", err - } - d.logger.Info("updated deployment information for collector and status or error", - zap.String("ownerID", updated.OwnerID), zap.String("sinkID", updated.SinkID), - zap.String("collectorName", updated.CollectorName), - zap.String("status", updated.LastStatus), zap.String("errorMessage", updated.LastErrorMessage)) - return updated.CollectorName, nil -} - -// UpdateStatus this will change the status in postgres and notify sinks service to show new status to user -func (d *deploymentService) UpdateStatus(ctx context.Context, ownerID string, sinkId string, status string, errorMessage string) error { - got, _, err := d.GetDeployment(ctx, ownerID, sinkId) - if err != nil { - return fmt.Errorf("could not find deployment to update status: %w", err) - } - now := time.Now() - if status != "" { - got.LastStatus = status - got.LastStatusUpdate = &now - got.LastErrorMessage = errorMessage - got.LastErrorTime = &now - } - - codedConfig, err := d.encodeConfig(got) - if err != nil { - return err - } - err = got.SetConfig(codedConfig) - if err != nil { - return err - } - updated, err := d.dbRepository.Update(ctx, got) - if err != nil { - return err - } - d.logger.Info("updated deployment status", - zap.String("ownerID", updated.OwnerID), zap.String("sinkID", updated.SinkID), - zap.String("status", updated.LastStatus), zap.String("errorMessage", updated.LastErrorMessage)) - err = d.maestroProducer.PublishSinkStatus(ctx, updated.OwnerID, updated.SinkID, updated.LastStatus, errorMessage) - if err != nil { - return err - } - return nil -} - -// RemoveDeployment this will remove the deployment from postgres and redis -func (d *deploymentService) RemoveDeployment(ctx context.Context, ownerID string, sinkId string) error { - err := d.dbRepository.Remove(ctx, ownerID, sinkId) - if err != nil { - return err - } - d.logger.Info("removed deployment", zap.String("ownerID", ownerID), zap.String("sinkID", sinkId)) - return nil -} - -func (d *deploymentService) GetDeploymentByCollectorName(ctx context.Context, collectorName string) (*Deployment, error) { - deployment, err := d.dbRepository.FindByCollectorName(ctx, collectorName) - if err != nil { - return nil, err - } - return deployment, nil -} diff --git a/maestro/deployment/setup_test.go b/maestro/deployment/setup_test.go deleted file mode 100644 index 905b277c7..000000000 --- a/maestro/deployment/setup_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package deployment - -import ( - "github.com/jmoiron/sqlx" - "github.com/orb-community/orb/maestro/postgres" - "github.com/orb-community/orb/pkg/config" - "github.com/ory/dockertest/v3" - "github.com/ory/dockertest/v3/docker" - "go.uber.org/zap" - "os" - "testing" -) - -var logger *zap.Logger -var pg *sqlx.DB - -func TestMain(m *testing.M) { - logger, _ = zap.NewProduction() - pool, err := dockertest.NewPool("") - if err != nil { - logger.Fatal("could not connect to docker:", zap.Error(err)) - } - - // Pull the PostgreSQL Docker image - postgresImage := "postgres:latest" - err = pool.Client.PullImage(docker.PullImageOptions{ - Repository: postgresImage, - Tag: "latest", - }, docker.AuthConfiguration{}) - if err != nil { - logger.Fatal("Could not pull Docker image:", zap.Error(err)) - } - - // Create a PostgreSQL container - resource, err := pool.Run("postgres", "latest", []string{ - "POSTGRES_USER=postgres", - "POSTGRES_PASSWORD=secret", - "POSTGRES_DB=testdb", - }) - if err != nil { - logger.Fatal("Could not start PostgreSQL container", zap.Error(err)) - } - - retryF := func() error { - localTest := config.PostgresConfig{ - Host: "localhost", - Port: resource.GetPort("5432/tcp"), - User: "postgres", - Pass: "secret", - DB: "testdb", - SSLMode: "disable", - } - pg, err = postgres.Connect(localTest) - if err != nil { - return err - } - - return pg.Ping() - } - if err := pool.Retry(retryF); err != nil { - logger.Fatal("could not connect to docker: %s", zap.Error(err)) - } - code := m.Run() - - if err := pool.Purge(resource); err != nil { - logger.Fatal("could not purge container: %s", zap.Error(err)) - } - - os.Exit(code) -} diff --git a/maestro/errors/maestro_errors.go b/maestro/errors/maestro_errors.go deleted file mode 100644 index 017e7f303..000000000 --- a/maestro/errors/maestro_errors.go +++ /dev/null @@ -1,5 +0,0 @@ -package errors - -import "github.com/orb-community/orb/pkg/errors" - -var NotFound = errors.New("not found") diff --git a/maestro/kubecontrol/kubecontrol.go b/maestro/kubecontrol/kubecontrol.go index 0aa7004ad..edf83ab7c 100644 --- a/maestro/kubecontrol/kubecontrol.go +++ b/maestro/kubecontrol/kubecontrol.go @@ -4,10 +4,6 @@ import ( "bufio" "context" "fmt" - "os" - "os/exec" - "strings" - _ "github.com/orb-community/orb/maestro/config" "github.com/orb-community/orb/pkg/errors" "go.uber.org/zap" @@ -15,6 +11,10 @@ import ( k8smetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "os" + "os/exec" + "strings" + "time" ) const namespace = "otelcollectors" @@ -26,22 +26,6 @@ type deployService struct { clientSet *kubernetes.Clientset } -const OperationDeploy CollectorOperation = iota -const OperationDelete = 1 - -type CollectorOperation int - -func (o CollectorOperation) Name() string { - switch o { - case OperationDeploy: - return "deploy" - case OperationDelete: - return "delete" - default: - return "unknown" - } -} - func NewService(logger *zap.Logger) Service { clusterConfig, err := rest.InClusterConfig() if err != nil { @@ -58,13 +42,19 @@ func NewService(logger *zap.Logger) Service { type Service interface { // CreateOtelCollector - create an existing collector by id - CreateOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) (string, error) + CreateOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) error + + // DeleteOtelCollector - delete an existing collector by id + DeleteOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) error + + // UpdateOtelCollector - update an existing collector by id + UpdateOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) error // KillOtelCollector - kill an existing collector by id, terminating by the ownerID, sinkID without the file - KillOtelCollector(ctx context.Context, deploymentName, sinkID string) error + KillOtelCollector(ctx context.Context, ownerID, sinkID string) error } -func (svc *deployService) collectorDeploy(ctx context.Context, operation, ownerID, sinkId, manifest string) (string, error) { +func (svc *deployService) collectorDeploy(ctx context.Context, operation, ownerID, sinkId, manifest string) error { _, status, err := svc.getDeploymentState(ctx, ownerID, sinkId) fileContent := []byte(manifest) tmp := strings.Split(string(fileContent), "\n") @@ -77,7 +67,7 @@ func (svc *deployService) collectorDeploy(ctx context.Context, operation, ownerI err = os.WriteFile("/tmp/otel-collector-"+sinkId+".json", []byte(newContent), 0644) if err != nil { svc.logger.Error("failed to write file content", zap.Error(err)) - return "", err + return err } stdOutListenFunction := func(out *bufio.Scanner, err *bufio.Scanner) { for out.Scan() { @@ -95,12 +85,7 @@ func (svc *deployService) collectorDeploy(ctx context.Context, operation, ownerI svc.logger.Info(fmt.Sprintf("successfully %s the otel-collector for sink-id: %s", operation, sinkId)) } - // delete temporary file - os.Remove("/tmp/otel-collector-"+sinkId+".json") - - // TODO this will be retrieved once we move to K8s SDK - collectorName := fmt.Sprintf("otel-%s", sinkId) - return collectorName, nil + return nil } func execCmd(_ context.Context, cmd *exec.Cmd, logger *zap.Logger, stdOutFunc func(stdOut *bufio.Scanner, stdErr *bufio.Scanner)) (*bufio.Scanner, *bufio.Scanner, error) { @@ -142,16 +127,38 @@ func (svc *deployService) getDeploymentState(ctx context.Context, _, sinkId stri } } status = "deleted" - return "", status, nil + return "", "deleted", nil +} + +func (svc *deployService) CreateOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) error { + err := svc.collectorDeploy(ctx, "apply", ownerID, sinkID, deploymentEntry) + if err != nil { + return err + } + + return nil } -func (svc *deployService) CreateOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) (string, error) { - col, err := svc.collectorDeploy(ctx, "apply", ownerID, sinkID, deploymentEntry) +func (svc *deployService) UpdateOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) error { + err := svc.DeleteOtelCollector(ctx, ownerID, sinkID, deploymentEntry) if err != nil { - return "", err + return err } + // Time to wait until K8s completely removes before re-creating + time.Sleep(3 * time.Second) + err = svc.CreateOtelCollector(ctx, ownerID, sinkID, deploymentEntry) + if err != nil { + return err + } + return nil +} - return col, nil +func (svc *deployService) DeleteOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) error { + err := svc.collectorDeploy(ctx, "delete", ownerID, sinkID, deploymentEntry) + if err != nil { + return err + } + return nil } func (svc *deployService) KillOtelCollector(ctx context.Context, deploymentName string, sinkId string) error { @@ -165,12 +172,8 @@ func (svc *deployService) KillOtelCollector(ctx context.Context, deploymentName } // execute action - cmdDeploy := exec.Command("kubectl", "delete", "deploy", deploymentName, "-n", namespace) - _, _, err := execCmd(ctx, cmdDeploy, svc.logger, stdOutListenFunction) - cmdService := exec.Command("kubectl", "delete", "service", deploymentName, "-n", namespace) - _, _, err = execCmd(ctx, cmdService, svc.logger, stdOutListenFunction) - cmdConfigMap := exec.Command("kubectl", "delete", "configmap", "otel-collector-config-"+sinkId, "-n", namespace) - _, _, err = execCmd(ctx, cmdConfigMap, svc.logger, stdOutListenFunction) + cmd := exec.Command("kubectl", "delete", "deploy", deploymentName, "-n", namespace) + _, _, err := execCmd(ctx, cmd, svc.logger, stdOutListenFunction) if err == nil { svc.logger.Info(fmt.Sprintf("successfully killed the otel-collector for sink-id: %s", sinkId)) } diff --git a/maestro/monitor/monitor.go b/maestro/monitor/monitor.go index aba190351..5ac8a7ccd 100644 --- a/maestro/monitor/monitor.go +++ b/maestro/monitor/monitor.go @@ -9,11 +9,10 @@ import ( "strings" "time" - "github.com/orb-community/orb/maestro/deployment" - "github.com/orb-community/orb/maestro/redis/producer" + "github.com/orb-community/orb/maestro/kubecontrol" + rediscons1 "github.com/orb-community/orb/maestro/redis/consumer" maestroconfig "github.com/orb-community/orb/maestro/config" - "github.com/orb-community/orb/maestro/kubecontrol" sinkspb "github.com/orb-community/orb/sinks/pb" "go.uber.org/zap" k8scorev1 "k8s.io/api/core/v1" @@ -28,13 +27,12 @@ const ( namespace = "otelcollectors" ) -func NewMonitorService(logger *zap.Logger, sinksClient *sinkspb.SinkServiceClient, mp producer.Producer, kubecontrol *kubecontrol.Service, deploySvc deployment.Service) Service { +func NewMonitorService(logger *zap.Logger, sinksClient *sinkspb.SinkServiceClient, eventStore rediscons1.Subscriber, kubecontrol *kubecontrol.Service) Service { return &monitorService{ - logger: logger, - sinksClient: *sinksClient, - maestroProducer: mp, - kubecontrol: *kubecontrol, - deploymentSvc: deploySvc, + logger: logger, + sinksClient: *sinksClient, + eventStore: eventStore, + kubecontrol: *kubecontrol, } } @@ -44,11 +42,10 @@ type Service interface { } type monitorService struct { - logger *zap.Logger - sinksClient sinkspb.SinkServiceClient - maestroProducer producer.Producer - deploymentSvc deployment.Service - kubecontrol kubecontrol.Service + logger *zap.Logger + sinksClient sinkspb.SinkServiceClient + eventStore rediscons1.Subscriber + kubecontrol kubecontrol.Service } func (svc *monitorService) Start(ctx context.Context, cancelFunc context.CancelFunc) error { @@ -168,12 +165,19 @@ func (svc *monitorService) monitorSinks(ctx context.Context) { } } if sink == nil { - svc.logger.Warn("sink not found for collector, depleting collector", zap.String("collector name", collector.Name)) + svc.logger.Warn("collector not found for sink, depleting collector", zap.String("collector name", collector.Name)) sinkId := collector.Name[5:41] - deploymentName := "otel-" + sinkId - svc.logger.Debug("compare deploymentName with collector name", zap.String("deploy name", deploymentName), - zap.String("collector name", collector.Name)) - err = svc.kubecontrol.KillOtelCollector(ctx, deploymentName, sinkId) + deploymentEntry, err := svc.eventStore.GetDeploymentEntryFromSinkId(ctx, sinkId) + if err != nil { + svc.logger.Error("did not find collector entry for sink", zap.String("sink-id", sinkId)) + deploymentName := "otel-" + sinkId + err = svc.kubecontrol.KillOtelCollector(ctx, deploymentName, sinkId) + if err != nil { + svc.logger.Error("error removing otel collector, manual intervention required", zap.Error(err)) + } + continue + } + err = svc.kubecontrol.DeleteOtelCollector(ctx, "", sinkId, deploymentEntry) if err != nil { svc.logger.Error("error removing otel collector", zap.Error(err)) } @@ -193,37 +197,44 @@ func (svc *monitorService) monitorSinks(ctx context.Context) { svc.logger.Error("error on getting logs, skipping", zap.Error(err)) continue } - var logErrMsg string status, logsErr = svc.analyzeLogs(logs) if status == "fail" { svc.logger.Error("error during analyze logs", zap.Error(logsErr)) continue } - if logsErr != nil { - logErrMsg = logsErr.Error() + lastActivity, activityErr := svc.eventStore.GetActivity(sink.Id) + // if logs reported 'active' status + // here we should check if LastActivity is up-to-date, otherwise we need to set sink as idle + idleLimit := time.Now().Unix() - idleTimeSeconds // within 10 minutes + if idleLimit >= lastActivity { + //changing state on sinks + svc.eventStore.PublishSinkStateChange(sink, "idle", logsErr, err) + //changing state on redis sinker + data.State.SetFromString("idle") + svc.eventStore.UpdateSinkStateCache(ctx, data) + deploymentEntry, errDeploy := svc.eventStore.GetDeploymentEntryFromSinkId(ctx, sink.Id) + if errDeploy != nil { + svc.logger.Error("Remove collector: error on getting collector deployment from redis", zap.Error(activityErr)) + continue + } + err = svc.kubecontrol.DeleteOtelCollector(ctx, sink.OwnerID, sink.Id, deploymentEntry) + if err != nil { + svc.logger.Error("error removing otel collector", zap.Error(err)) + } + continue } - //set the new sink status if changed during checks if sink.GetState() != status && status != "" { - svc.logger.Info("changing sink status", - zap.Any("before", sink.GetState()), - zap.String("new status", status), - zap.String("SinkID", sink.Id), - zap.String("ownerID", sink.OwnerID)) + svc.logger.Info("changing sink status", zap.Any("before", sink.GetState()), zap.String("new status", status), zap.String("SinkID", sink.Id), zap.String("ownerID", sink.OwnerID)) if err != nil { - svc.logger.Error("error updating status", - zap.Any("before", sink.GetState()), - zap.String("new status", status), - zap.String("error_message (opt)", err.Error()), - zap.String("SinkID", sink.Id), - zap.String("ownerID", sink.OwnerID)) + svc.logger.Error("error updating status", zap.Any("before", sink.GetState()), zap.String("new status", status), zap.String("error_message (opt)", err.Error()), zap.String("SinkID", sink.Id), zap.String("ownerID", sink.OwnerID)) } else { - svc.logger.Info("updating status", - zap.Any("before", sink.GetState()), - zap.String("new status", status), - zap.String("SinkID", sink.Id), - zap.String("ownerID", sink.OwnerID)) - err = svc.deploymentSvc.UpdateStatus(ctx, sink.OwnerID, sink.Id, status, logErrMsg) + svc.logger.Info("updating status", zap.Any("before", sink.GetState()), zap.String("new status", status), zap.String("SinkID", sink.Id), zap.String("ownerID", sink.OwnerID)) + // changing state on sinks + svc.eventStore.PublishSinkStateChange(sink, status, logsErr, err) + // changing state on redis sinker + data.State.SetFromString(status) + svc.eventStore.UpdateSinkStateCache(ctx, data) } } } diff --git a/maestro/password/password.go b/maestro/password/password.go deleted file mode 100644 index 5f713f2ed..000000000 --- a/maestro/password/password.go +++ /dev/null @@ -1,93 +0,0 @@ -package password - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "go.uber.org/zap" - "io" -) - -type EncryptionService interface { - EncodePassword(plainText string) (string, error) - DecodePassword(cipheredText string) (string, error) -} - -func NewEncryptionService(logger *zap.Logger, key string) EncryptionService { - ps := &encryptionService{ - logger: logger, - key: key, - } - return ps -} - -var _ EncryptionService = (*encryptionService)(nil) - -type encryptionService struct { - key string - logger *zap.Logger -} - -func (ps *encryptionService) EncodePassword(plainText string) (string, error) { - cipherText, err := encrypt([]byte(plainText), ps.key) - if err != nil { - ps.logger.Error("failed to encrypt password", zap.Error(err)) - return "", err - } - return cipherText, nil -} - -func (ps *encryptionService) DecodePassword(cipheredText string) (string, error) { - hexedByte, err := hex.DecodeString(cipheredText) - if err != nil { - ps.logger.Error("failed to decode password", zap.Error(err)) - return "", err - } - plainByte, err := decrypt(hexedByte, ps.key) - if err != nil { - ps.logger.Error("failed to decrypt password", zap.Error(err)) - return "", err - } - - return string(plainByte), nil -} - -func encrypt(data []byte, passphrase string) (string, error) { - block, _ := aes.NewCipher(createHash(passphrase)) - gcm, err := cipher.NewGCM(block) - if err != nil { - return "", err - } - nonce := make([]byte, gcm.NonceSize()) - if _, err = io.ReadFull(rand.Reader, nonce); err != nil { - return "", err - } - ciphertext := gcm.Seal(nonce, nonce, data, nil) - return hex.EncodeToString(ciphertext), nil -} - -func decrypt(data []byte, passphrase string) ([]byte, error) { - key := createHash(passphrase) - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - gcm, err := cipher.NewGCM(block) - if err != nil { - return nil, err - } - nonceSize := gcm.NonceSize() - nonce, ciphertext := data[:nonceSize], data[nonceSize:] - plaintext, err := gcm.Open(nil, nonce, ciphertext, nil) - if err != nil { - return nil, err - } - return plaintext, nil -} - -func createHash(key string) []byte { - hasher := sha256.Sum256([]byte(key)) - return hasher[:] -} diff --git a/maestro/password/password_test.go b/maestro/password/password_test.go deleted file mode 100644 index e9db130a4..000000000 --- a/maestro/password/password_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package password - -import ( - "github.com/stretchr/testify/assert" - "go.uber.org/zap" - "testing" -) - -func Test_passwordService_EncodePassword(t *testing.T) { - logger, _ := zap.NewDevelopment() - - tests := []struct { - name string - key string - plainText string - encodedString string - }{ - { - name: "with 32 char key", - key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - plainText: "test", - encodedString: "bbf4b204e5daea6e7cb4cb8dec2011c91de502db08c1fc37f4e1ba8b8da60cf0", - }, - { - name: "with smaller key", - key: "testing", - plainText: "test", - encodedString: "c8dd6f7f76d1b988574559959c68615ae72487b13bef2f7c4afbce204cc11864", - }, - { - name: "with uuid-key", - key: "eb1bc7f4-2031-41c4-85fa-2ddce3abfc3b", - plainText: "test", - encodedString: "1f1114dd9e7953585a768d280a3d0f8592647e0761d085bfa83b9b57c2110a5c", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ps := NewEncryptionService(logger, tt.key) - got, err := ps.EncodePassword(tt.plainText) - if err != nil { - t.Fatalf("received error on encoding password: %e", err) - } - t.Logf("storing %s", got) - password, err := ps.DecodePassword(got) - if err != nil { - t.Fatalf("received error on decoding password: %e", err) - } - t.Logf("retrieving %s", password) - assert.Equalf(t, tt.plainText, password, "Got Decoded Password %s", password) - getPassword, err := ps.DecodePassword(tt.encodedString) - if err != nil { - t.Fatalf("received error on decoding stored password: %e", err) - } - t.Logf("retrieving %s", getPassword) - assert.Equalf(t, getPassword, password, "Stored coded password is %s", getPassword) - }) - } -} diff --git a/maestro/postgres/init.go b/maestro/postgres/init.go deleted file mode 100644 index 83077a742..000000000 --- a/maestro/postgres/init.go +++ /dev/null @@ -1,61 +0,0 @@ -package postgres - -import ( - "fmt" - - "github.com/jmoiron/sqlx" - _ "github.com/lib/pq" // required for SQL access - "github.com/orb-community/orb/pkg/config" - migrate "github.com/rubenv/sql-migrate" -) - -// Connect creates a connection to the PostgreSQL instance and applies any -// unapplied database migrations. A non-nil error is returned to indicate -// failure. -func Connect(cfg config.PostgresConfig) (*sqlx.DB, error) { - url := fmt.Sprintf("host=%s port=%s user=%s dbname=%s password=%s sslmode=%s sslcert=%s sslkey=%s sslrootcert=%s", cfg.Host, cfg.Port, cfg.User, cfg.DB, cfg.Pass, cfg.SSLMode, cfg.SSLCert, cfg.SSLKey, cfg.SSLRootCert) - - db, err := sqlx.Open("postgres", url) - if err != nil { - return nil, err - } - - if err := migrateDB(db); err != nil { - return nil, err - } - - return db, nil -} - -func migrateDB(db *sqlx.DB) error { - migrations := &migrate.MemoryMigrationSource{ - Migrations: []*migrate.Migration{ - { - Id: "1", - Up: []string{ - `CREATE TABLE IF NOT EXISTS deployments ( - id UUID NOT NULL DEFAULT gen_random_uuid(), - owner_id VARCHAR(255) NOT NULL, - sink_id VARCHAR(255) NOT NULL, - backend VARCHAR(255), - config JSONB, - last_status VARCHAR(255), - last_status_update TIMESTAMP, - last_error_message VARCHAR(255), - last_error_time TIMESTAMP, - collector_name VARCHAR(255), - last_collector_deploy_time TIMESTAMP, - last_collector_stop_time TIMESTAMP - );`, - `ALTER TABLE "deployments" ADD CONSTRAINT "deployments_owner_id_sink_id" UNIQUE ("owner_id", "sink_id");`, - }, - Down: []string{ - "DROP TABLE deployments", - }, - }, - }, - } - _, err := migrate.Exec(db.DB, "postgres", migrations, migrate.Up) - - return err -} diff --git a/maestro/redis/consumer/hashset.go b/maestro/redis/consumer/hashset.go new file mode 100644 index 000000000..75b895e84 --- /dev/null +++ b/maestro/redis/consumer/hashset.go @@ -0,0 +1,234 @@ +package consumer + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + "time" + + redis2 "github.com/go-redis/redis/v8" + + "github.com/orb-community/orb/maestro/config" + "github.com/orb-community/orb/maestro/redis" + "github.com/orb-community/orb/pkg/types" + sinkspb "github.com/orb-community/orb/sinks/pb" + "go.uber.org/zap" +) + +const ( + deploymentKey = "orb.sinks.deployment" + activityPrefix = "sinker_activity" + streamLen = 1000 +) + +func (es eventStore) GetDeploymentEntryFromSinkId(ctx context.Context, sinkId string) (string, error) { + cmd := es.sinkerKeyRedisClient.HGet(ctx, deploymentKey, sinkId) + if err := cmd.Err(); err != nil { + es.logger.Error("error during redis reading of SinkId", zap.String("sink-id", sinkId), zap.Error(err)) + return "", err + } + return cmd.String(), nil +} + +// handleSinksDeleteCollector will delete Deployment Entry and force delete otel collector +func (es eventStore) handleSinksDeleteCollector(ctx context.Context, event redis.SinksUpdateEvent) error { + es.logger.Info("Received maestro DELETE event from sinks ID", zap.String("sinkID", event.SinkID), zap.String("owner", event.Owner)) + err := es.RemoveSinkActivity(ctx, event.SinkID) + if err != nil { + return err + } + deploymentEntry, err := es.GetDeploymentEntryFromSinkId(ctx, event.SinkID) + if err != nil { + es.logger.Error("did not find collector entry for sink", zap.String("sink-id", event.SinkID)) + return err + } + err = es.sinkerKeyRedisClient.HDel(ctx, deploymentKey, event.SinkID).Err() + if err != nil { + return err + } + err = es.kubecontrol.DeleteOtelCollector(ctx, event.Owner, event.SinkID, deploymentEntry) + if err != nil { + return err + } + return nil +} + +// handleSinksCreateCollector will create Deployment Entry in Redis +func (es eventStore) handleSinksCreateCollector(ctx context.Context, event redis.SinksUpdateEvent) error { + es.logger.Info("Received event to Create DeploymentEntry from sinks ID", zap.String("sinkID", event.SinkID), zap.String("owner", event.Owner)) + sinkData, err := es.sinksClient.RetrieveSink(ctx, &sinkspb.SinkByIDReq{ + SinkID: event.SinkID, + OwnerID: event.Owner, + }) + if err != nil || (sinkData != nil && sinkData.Config == nil) { + es.logger.Error("could not fetch info for sink", zap.String("sink-id", event.SinkID), zap.Error(err)) + return err + } + var metadata types.Metadata + if err := json.Unmarshal(sinkData.Config, &metadata); err != nil { + return err + } + data := config.SinkData{ + SinkID: sinkData.Id, + OwnerID: sinkData.OwnerID, + Backend: sinkData.Backend, + Config: metadata, + } + err2 := es.CreateDeploymentEntry(ctx, data) + if err2 != nil { + return err2 + } + + return nil +} + +func (es eventStore) CreateDeploymentEntry(ctx context.Context, sink config.SinkData) error { + deploy, err := config.GetDeploymentJson(es.kafkaUrl, sink) + if err != nil { + es.logger.Error("error trying to get deployment json for sink ID", zap.String("sinkId", sink.SinkID), zap.Error(err)) + return err + } + + es.sinkerKeyRedisClient.HSet(ctx, deploymentKey, sink.SinkID, deploy) + return nil +} + +// handleSinksUpdateCollector will update Deployment Entry in Redis and force update otel collector +func (es eventStore) handleSinksUpdateCollector(ctx context.Context, event redis.SinksUpdateEvent) error { + es.logger.Info("Received event to Update DeploymentEntry from sinks ID", zap.String("sinkID", event.SinkID), zap.String("owner", event.Owner)) + sinkData, err := es.sinksClient.RetrieveSink(ctx, &sinkspb.SinkByIDReq{ + SinkID: event.SinkID, + OwnerID: event.Owner, + }) + if err != nil { + es.logger.Error("could not fetch info for sink", zap.String("sink-id", event.SinkID), zap.Error(err)) + } + var metadata types.Metadata + if err := json.Unmarshal(sinkData.Config, &metadata); err != nil { + return err + } + data := config.SinkData{ + SinkID: sinkData.Id, + OwnerID: sinkData.OwnerID, + Backend: sinkData.Backend, + Config: metadata, + } + _ = data.State.SetFromString(sinkData.State) + + deploy, err := config.GetDeploymentJson(es.kafkaUrl, data) + + if err != nil { + es.logger.Error("error trying to get deployment json for sink ID", zap.String("sinkId", event.SinkID), zap.Error(err)) + return err + } + err = es.sinkerKeyRedisClient.HSet(ctx, deploymentKey, event.SinkID, deploy).Err() + if err != nil { + es.logger.Error("error trying to update deployment json for sink ID", zap.String("sinkId", event.SinkID), zap.Error(err)) + return err + } + err = es.kubecontrol.UpdateOtelCollector(ctx, event.Owner, event.SinkID, deploy) + if err != nil { + return err + } + return nil +} + +func (es eventStore) UpdateSinkCache(ctx context.Context, data config.SinkData) (err error) { + keyPrefix := "sinker_key" + skey := fmt.Sprintf("%s-%s:%s", keyPrefix, data.OwnerID, data.SinkID) + bytes, err := json.Marshal(data) + if err != nil { + return err + } + if err = es.sinkerKeyRedisClient.Set(ctx, skey, bytes, 0).Err(); err != nil { + es.logger.Error("failed to update sink cache", zap.Error(err)) + return err + } + return +} + +func (es eventStore) UpdateSinkStateCache(ctx context.Context, data config.SinkData) (err error) { + keyPrefix := "sinker_key" + skey := fmt.Sprintf("%s-%s:%s", keyPrefix, data.OwnerID, data.SinkID) + bytes, err := json.Marshal(data) + if err != nil { + es.logger.Error("error update sink cache state", zap.Error(err)) + return err + } + if err = es.sinkerKeyRedisClient.Set(ctx, skey, bytes, 0).Err(); err != nil { + return err + } + return +} + +// GetActivity collector activity +func (es eventStore) GetActivity(sinkID string) (int64, error) { + if sinkID == "" { + return 0, errors.New("invalid parameters") + } + skey := fmt.Sprintf("%s:%s", activityPrefix, sinkID) + secs, err := es.sinkerKeyRedisClient.Get(context.Background(), skey).Result() + if err != nil { + return 0, err + } + lastActivity, _ := strconv.ParseInt(secs, 10, 64) + return lastActivity, nil +} + +func (es eventStore) RemoveSinkActivity(ctx context.Context, sinkId string) error { + skey := fmt.Sprintf("%s:%s", activityPrefix, sinkId) + cmd := es.sinkerKeyRedisClient.Del(ctx, skey, sinkId) + if err := cmd.Err(); err != nil { + es.logger.Error("error during redis reading of SinkId", zap.String("sink-id", sinkId), zap.Error(err)) + return err + } + return nil +} + +func (es eventStore) PublishSinkStateChange(sink *sinkspb.SinkRes, status string, logsErr error, err error) { + streamID := "orb.sinker" + logMessage := "" + if logsErr != nil { + logMessage = logsErr.Error() + } + event := redis.SinkerUpdateEvent{ + SinkID: sink.Id, + Owner: sink.OwnerID, + State: status, + Msg: logMessage, + Timestamp: time.Now(), + } + + record := &redis2.XAddArgs{ + Stream: streamID, + Values: event.Encode(), + MaxLen: streamLen, + Approx: true, + } + err = es.streamRedisClient.XAdd(context.Background(), record).Err() + if err != nil { + es.logger.Error("error sending event to event store", zap.Error(err)) + } + es.logger.Info("Maestro notified change of status for sink", zap.String("newState", status), zap.String("sink-id", sink.Id)) +} + +func decodeSinksEvent(event map[string]interface{}, operation string) (redis.SinksUpdateEvent, error) { + val := redis.SinksUpdateEvent{ + SinkID: read(event, "sink_id", ""), + Owner: read(event, "owner", ""), + Config: readMetadata(event, "config"), + Timestamp: time.Now(), + } + if operation != sinksDelete { + var metadata types.Metadata + if err := json.Unmarshal([]byte(read(event, "config", "")), &metadata); err != nil { + return redis.SinksUpdateEvent{}, err + } + val.Config = metadata + return val, nil + } + + return val, nil +} diff --git a/maestro/redis/consumer/sinker.go b/maestro/redis/consumer/sinker.go deleted file mode 100644 index de431d1d0..000000000 --- a/maestro/redis/consumer/sinker.go +++ /dev/null @@ -1,151 +0,0 @@ -package consumer - -import ( - "context" - - "github.com/go-redis/redis/v8" - maestroredis "github.com/orb-community/orb/maestro/redis" - "github.com/orb-community/orb/maestro/service" - "go.uber.org/zap" -) - -type SinkerActivityListener interface { - // SubscribeSinksEvents - listen to sink_activity, sink_idle because of state management and deployments start or stop - SubscribeSinkerIdleEvents(ctx context.Context) error - - // SubscribeSinksEvents - listen to sink_activity - SubscribeSinkerActivityEvents(ctx context.Context) error -} - -type sinkerActivityListenerService struct { - logger *zap.Logger - redisClient *redis.Client - eventService service.EventService -} - -const ( - idleStream = "orb.sink_idle" - activityStream = "orb.sink_activity" -) - -func NewSinkerActivityListener(l *zap.Logger, eventService service.EventService, redisClient *redis.Client) SinkerActivityListener { - logger := l.Named("sinker-activity-listener") - return &sinkerActivityListenerService{ - logger: logger, - redisClient: redisClient, - eventService: eventService, - } -} - -func (s *sinkerActivityListenerService) SubscribeSinksActivity(ctx context.Context) error { - err := s.redisClient.XGroupCreateMkStream(ctx, activityStream, maestroredis.GroupMaestro, "$").Err() - if err != nil && err.Error() != maestroredis.Exists { - return err - } - s.logger.Debug("Reading Sinker Activity Events", zap.String("stream", activityStream)) - for { - select { - case <-ctx.Done(): - s.logger.Info("closing sinker_activity_listener routine") - return nil - default: - streams, err := s.redisClient.XReadGroup(ctx, &redis.XReadGroupArgs{ - Group: maestroredis.GroupMaestro, - Consumer: "orb_maestro-es-consumer", - Streams: []string{activityStream, ">"}, - Count: 1000, - }).Result() - if err != nil || len(streams) == 0 { - if err != nil { - s.logger.Error("error reading activity stream", zap.Error(err)) - } - continue - } - for _, msg := range streams[0].Messages { - event := maestroredis.SinkerUpdateEvent{} - event.Decode(msg.Values) - s.logger.Debug("Reading message from activity stream", - zap.String("message_id", msg.ID), - zap.String("sink_id", event.SinkID), - zap.String("owner_id", event.OwnerID)) - go func() { - err := s.eventService.HandleSinkActivity(ctx, event) - if err != nil { - s.logger.Error("Failed to handle sinks event", zap.Error(err)) - } else { - s.redisClient.XAck(ctx, activityStream, maestroredis.GroupMaestro, msg.ID) - } - }() - if err != nil { - s.logger.Error("error receiving message", zap.Error(err)) - return err - } - } - } - } -} - -func (s *sinkerActivityListenerService) SubscribeSinksIdle(ctx context.Context) error { - err := s.redisClient.XGroupCreateMkStream(ctx, idleStream, maestroredis.GroupMaestro, "$").Err() - if err != nil && err.Error() != maestroredis.Exists { - return err - } - s.logger.Debug("Reading Sinker Idle Events", zap.String("stream", idleStream)) - for { - select { - case <-ctx.Done(): - s.logger.Info("closing sinker_idle_listener routine") - return nil - default: - streams, err := s.redisClient.XReadGroup(ctx, &redis.XReadGroupArgs{ - Group: maestroredis.GroupMaestro, - Consumer: "orb_maestro-es-consumer", - Streams: []string{idleStream, ">"}, - }).Result() - if err != nil || len(streams) == 0 { - if err != nil { - s.logger.Error("error reading idle stream", zap.Error(err)) - } - continue - } - for _, msg := range streams[0].Messages { - event := maestroredis.SinkerUpdateEvent{} - event.Decode(msg.Values) - s.logger.Debug("Reading message from idle stream", - zap.String("message_id", msg.ID), - zap.String("sink_id", event.SinkID), - zap.String("owner_id", event.OwnerID)) - go func() { - err := s.eventService.HandleSinkIdle(ctx, event) - if err != nil { - s.logger.Error("Failed to handle sinks event", zap.Error(err)) - } else { - s.redisClient.XAck(ctx, idleStream, maestroredis.GroupMaestro, msg.ID) - } - }() - if err != nil { - s.logger.Error("error receiving message", zap.Error(err)) - return err - } - } - } - } -} - -func (s *sinkerActivityListenerService) SubscribeSinkerActivityEvents(ctx context.Context) error { - err := s.SubscribeSinksActivity(ctx) - if err != nil { - s.logger.Error("error reading activity stream", zap.Error(err)) - return err - } - return nil -} - -func (s *sinkerActivityListenerService) SubscribeSinkerIdleEvents(ctx context.Context) error { - err := s.SubscribeSinksIdle(ctx) - if err != nil { - s.logger.Error("error reading idle stream", zap.Error(err)) - return err - } - return nil -} diff --git a/maestro/redis/consumer/sinks.go b/maestro/redis/consumer/sinks.go deleted file mode 100644 index 5a0e486db..000000000 --- a/maestro/redis/consumer/sinks.go +++ /dev/null @@ -1,137 +0,0 @@ -package consumer - -import ( - "context" - "errors" - "github.com/go-redis/redis/v8" - maestroredis "github.com/orb-community/orb/maestro/redis" - "github.com/orb-community/orb/maestro/service" - sinkspb "github.com/orb-community/orb/sinks/pb" - redis2 "github.com/orb-community/orb/sinks/redis" - "go.uber.org/zap" -) - -type SinksListener interface { - // SubscribeSinksEvents - listen to sinks.create, sinks.update, sinks.delete to handle the deployment creation - SubscribeSinksEvents(context context.Context) error -} - -type sinksListenerService struct { - logger *zap.Logger - deploymentService service.EventService - redisClient *redis.Client - sinksClient sinkspb.SinkServiceClient -} - -func NewSinksListenerController(l *zap.Logger, eventService service.EventService, redisClient *redis.Client, - sinksClient sinkspb.SinkServiceClient) SinksListener { - logger := l.Named("sinks_listener") - return &sinksListenerService{ - logger: logger, - deploymentService: eventService, - redisClient: redisClient, - sinksClient: sinksClient, - } -} - -// SubscribeSinksEvents Subscribe to listen events from sinks to maestro -func (ls *sinksListenerService) SubscribeSinksEvents(ctx context.Context) error { - //listening sinker events - err := ls.redisClient.XGroupCreateMkStream(ctx, redis2.StreamSinks, redis2.GroupMaestro, "$").Err() - if err != nil && err.Error() != redis2.Exists { - return err - } - ls.logger.Debug("Reading Sinks Events", zap.String("stream", redis2.StreamSinks)) - for { - streams, err := ls.redisClient.XReadGroup(ctx, &redis.XReadGroupArgs{ - Group: redis2.GroupMaestro, - Consumer: "orb_maestro-es-consumer", - Streams: []string{redis2.StreamSinks, ">"}, - Count: 100, - }).Result() - if err != nil || len(streams) == 0 { - continue - } - for _, msg := range streams[0].Messages { - err := ls.ReceiveMessage(ctx, msg) - if err != nil { - return err - } - } - } -} - -func (ls *sinksListenerService) ReceiveMessage(ctx context.Context, msg redis.XMessage) error { - logger := ls.logger.Named("sinks_listener:" + msg.ID) - event := msg.Values - rte, err := redis2.DecodeSinksEvent(event, event["operation"].(string)) - if err != nil { - logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) - return err - } - logger.Info("received message in sinks event bus", zap.Any("operation", event["operation"])) - switch event["operation"] { - case redis2.SinkCreate: - go func() { - err = ls.handleSinksCreate(ctx, rte) //should create deployment - if err != nil { - logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) - } else { - ls.redisClient.XAck(ctx, redis2.StreamSinks, redis2.GroupMaestro, msg.ID) - } - }() - case redis2.SinkUpdate: - go func() { - err = ls.handleSinksUpdate(ctx, rte) //should create collector - if err != nil { - logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) - } else { - ls.redisClient.XAck(ctx, redis2.StreamSinks, redis2.GroupMaestro, msg.ID) - } - }() - case redis2.SinkDelete: - go func() { - err = ls.handleSinksDelete(ctx, rte) //should delete collector - if err != nil { - logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) - } else { - ls.redisClient.XAck(ctx, redis2.StreamSinks, redis2.GroupMaestro, msg.ID) - } - }() - case <-ctx.Done(): - return errors.New("stopped listening to sinks, due to context cancellation") - } - return nil -} - -// handleSinksUpdate logic moved to deployment.EventService -func (ls *sinksListenerService) handleSinksUpdate(ctx context.Context, event maestroredis.SinksUpdateEvent) error { - ls.logger.Debug("Received sinks UPDATE event from sinks ID", zap.String("sinkID", event.SinkID), zap.String("owner", event.Owner)) - err := ls.deploymentService.HandleSinkUpdate(ctx, event) - if err != nil { - return err - } - - return nil -} - -// handleSinksDelete logic moved to deployment.EventService -func (ls *sinksListenerService) handleSinksDelete(ctx context.Context, event maestroredis.SinksUpdateEvent) error { - ls.logger.Debug("Received sinks DELETE event from sinks ID", zap.String("sinkID", event.SinkID), zap.String("owner", event.Owner)) - err := ls.deploymentService.HandleSinkDelete(ctx, event) - if err != nil { - return err - } - return nil -} - -// handleSinksCreate logic moved to deployment.EventService -func (ls *sinksListenerService) handleSinksCreate(ctx context.Context, event maestroredis.SinksUpdateEvent) error { - ls.logger.Debug("Received sinks to CREATE event from sinks ID", zap.String("sinkID", event.SinkID), zap.String("owner", event.Owner)) - err := ls.deploymentService.HandleSinkCreate(ctx, event) - if err != nil { - return err - } - - return nil -} diff --git a/maestro/redis/consumer/streams.go b/maestro/redis/consumer/streams.go new file mode 100644 index 000000000..c5eb0634f --- /dev/null +++ b/maestro/redis/consumer/streams.go @@ -0,0 +1,254 @@ +package consumer + +import ( + "context" + "encoding/json" + "time" + + "github.com/orb-community/orb/maestro/config" + "github.com/orb-community/orb/pkg/errors" + + "github.com/orb-community/orb/maestro/kubecontrol" + maestroredis "github.com/orb-community/orb/maestro/redis" + "github.com/orb-community/orb/pkg/types" + sinkspb "github.com/orb-community/orb/sinks/pb" + + "github.com/go-redis/redis/v8" + "go.uber.org/zap" +) + +const ( + streamSinks = "orb.sinks" + streamSinker = "orb.sinker" + groupMaestro = "orb.maestro" + + sinkerPrefix = "sinker." + sinkerUpdate = sinkerPrefix + "update" + + sinksPrefix = "sinks." + sinksUpdate = sinksPrefix + "update" + sinksCreate = sinksPrefix + "create" + sinksDelete = sinksPrefix + "remove" + + exists = "BUSYGROUP Consumer Group name already exists" +) + +type Subscriber interface { + CreateDeploymentEntry(ctx context.Context, sink config.SinkData) error + GetDeploymentEntryFromSinkId(ctx context.Context, sinkId string) (string, error) + + UpdateSinkCache(ctx context.Context, data config.SinkData) (err error) + UpdateSinkStateCache(ctx context.Context, data config.SinkData) (err error) + PublishSinkStateChange(sink *sinkspb.SinkRes, status string, logsErr error, err error) + + GetActivity(sinkID string) (int64, error) + RemoveSinkActivity(ctx context.Context, sinkId string) error + + SubscribeSinksEvents(context context.Context) error + SubscribeSinkerEvents(context context.Context) error +} + +type eventStore struct { + kafkaUrl string + kubecontrol kubecontrol.Service + sinksClient sinkspb.SinkServiceClient + streamRedisClient *redis.Client + sinkerKeyRedisClient *redis.Client + esconsumer string + logger *zap.Logger +} + +func NewEventStore(streamRedisClient, sinkerKeyRedisClient *redis.Client, kafkaUrl string, kubecontrol kubecontrol.Service, esconsumer string, sinksClient sinkspb.SinkServiceClient, logger *zap.Logger) Subscriber { + return eventStore{ + kafkaUrl: kafkaUrl, + kubecontrol: kubecontrol, + streamRedisClient: streamRedisClient, + sinkerKeyRedisClient: sinkerKeyRedisClient, + sinksClient: sinksClient, + esconsumer: esconsumer, + logger: logger, + } +} + +// SubscribeSinkerEvents Subscribe to listen events from sinker to maestro +func (es eventStore) SubscribeSinkerEvents(ctx context.Context) error { + err := es.streamRedisClient.XGroupCreateMkStream(ctx, streamSinker, groupMaestro, "$").Err() + if err != nil && err.Error() != exists { + return err + } + + for { + streams, err := es.streamRedisClient.XReadGroup(ctx, &redis.XReadGroupArgs{ + Group: groupMaestro, + Consumer: "orb_maestro-es-consumer", + Streams: []string{streamSinker, ">"}, + Count: 100, + }).Result() + if err != nil || len(streams) == 0 { + continue + } + for _, msg := range streams[0].Messages { + event := msg.Values + rte := decodeSinkerStateUpdate(event) + // here we should listen just event coming from sinker, not our own "publishState" events + if rte.State == "active" { + es.logger.Info("received message in sinker event bus", zap.Any("operation", event["operation"])) + switch event["operation"] { + case sinkerUpdate: + go func() { + err = es.handleSinkerCreateCollector(ctx, rte) //sinker request to create collector + if err != nil { + es.logger.Error("Failed to handle sinker event", zap.Any("operation", event["operation"]), zap.Error(err)) + } else { + es.streamRedisClient.XAck(ctx, streamSinker, groupMaestro, msg.ID) + } + }() + + case <-ctx.Done(): + return errors.New("stopped listening to sinks, due to context cancellation") + } + } + } + } +} + +// SubscribeSinksEvents Subscribe to listen events from sinks to maestro +func (es eventStore) SubscribeSinksEvents(ctx context.Context) error { + //listening sinker events + err := es.streamRedisClient.XGroupCreateMkStream(ctx, streamSinks, groupMaestro, "$").Err() + if err != nil && err.Error() != exists { + return err + } + + for { + streams, err := es.streamRedisClient.XReadGroup(ctx, &redis.XReadGroupArgs{ + Group: groupMaestro, + Consumer: "orb_maestro-es-consumer", + Streams: []string{streamSinks, ">"}, + Count: 100, + }).Result() + if err != nil || len(streams) == 0 { + continue + } + for _, msg := range streams[0].Messages { + event := msg.Values + rte, err := decodeSinksEvent(event, event["operation"].(string)) + if err != nil { + es.logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) + break + } + es.logger.Info("received message in sinks event bus", zap.Any("operation", event["operation"])) + switch event["operation"] { + case sinksCreate: + go func() { + err = es.handleSinksCreateCollector(ctx, rte) //should create collector + if err != nil { + es.logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) + } else { + es.streamRedisClient.XAck(ctx, streamSinks, groupMaestro, msg.ID) + } + }() + case sinksUpdate: + go func() { + err = es.handleSinksUpdateCollector(ctx, rte) //should create collector + if err != nil { + es.logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) + } else { + es.streamRedisClient.XAck(ctx, streamSinks, groupMaestro, msg.ID) + } + }() + case sinksDelete: + go func() { + err = es.handleSinksDeleteCollector(ctx, rte) //should delete collector + if err != nil { + es.logger.Error("Failed to handle sinks event", zap.Any("operation", event["operation"]), zap.Error(err)) + } else { + es.streamRedisClient.XAck(ctx, streamSinks, groupMaestro, msg.ID) + } + }() + case <-ctx.Done(): + return errors.New("stopped listening to sinks, due to context cancellation") + } + } + } +} + +// handleSinkerDeleteCollector Delete collector +func (es eventStore) handleSinkerDeleteCollector(ctx context.Context, event maestroredis.SinkerUpdateEvent) error { + es.logger.Info("Received maestro DELETE event from sinker, sink state", zap.String("state", event.State), zap.String("sinkID", event.SinkID), zap.String("ownerID", event.Owner)) + deploymentEntry, err := es.GetDeploymentEntryFromSinkId(ctx, event.SinkID) + if err != nil { + return err + } + err = es.kubecontrol.DeleteOtelCollector(ctx, event.Owner, event.SinkID, deploymentEntry) + if err != nil { + return err + } + return nil +} + +// handleSinkerCreateCollector Create collector +func (es eventStore) handleSinkerCreateCollector(ctx context.Context, event maestroredis.SinkerUpdateEvent) error { + es.logger.Info("Received maestro CREATE event from sinker, sink state", zap.String("state", event.State), zap.String("sinkID", event.SinkID), zap.String("ownerID", event.Owner)) + deploymentEntry, err := es.GetDeploymentEntryFromSinkId(ctx, event.SinkID) + if err != nil { + sink, err := es.sinksClient.RetrieveSink(ctx, &sinkspb.SinkByIDReq{ + SinkID: event.SinkID, + OwnerID: event.Owner, + }) + if err != nil { + es.logger.Error("could not find deployment entry from sink-id", zap.String("sinkID", event.SinkID), zap.Error(err)) + return err + } + var metadata types.Metadata + if err := json.Unmarshal(sink.Config, &metadata); err != nil { + return err + } + sinkData := config.SinkData{ + SinkID: sink.Id, + OwnerID: sink.OwnerID, + Backend: sink.Backend, + Config: metadata, + } + err = es.CreateDeploymentEntry(ctx, sinkData) + if err != nil { + es.logger.Error("could not create deployment entry from sink", zap.String("sinkID", event.SinkID), zap.Error(err)) + return err + } + } + err = es.kubecontrol.CreateOtelCollector(ctx, event.Owner, event.SinkID, deploymentEntry) + if err != nil { + es.logger.Error("could not find deployment entry from sink-id", zap.String("sinkID", event.SinkID), zap.Error(err)) + return err + } + return nil +} + +func decodeSinkerStateUpdate(event map[string]interface{}) maestroredis.SinkerUpdateEvent { + val := maestroredis.SinkerUpdateEvent{ + Owner: read(event, "owner", ""), + SinkID: read(event, "sink_id", ""), + State: read(event, "state", ""), + Timestamp: time.Time{}, + } + + return val +} + +func read(event map[string]interface{}, key, def string) string { + val, ok := event[key].(string) + if !ok { + return def + } + + return val +} + +func readMetadata(event map[string]interface{}, key string) types.Metadata { + val, ok := event[key].(types.Metadata) + if !ok { + return types.Metadata{} + } + + return val +} diff --git a/maestro/redis/events.go b/maestro/redis/events.go index 56d315e2b..738903e9f 100644 --- a/maestro/redis/events.go +++ b/maestro/redis/events.go @@ -1,3 +1,10 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +// Adapted for Orb project, modifications licensed under MPL v. 2.0: +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ package redis import ( @@ -8,55 +15,29 @@ import ( const ( SinkerPrefix = "sinker." SinkerUpdate = SinkerPrefix + "update" - GroupMaestro = "orb.maestro" - Exists = "BUSYGROUP Consumer Group name already exists" ) type SinksUpdateEvent struct { SinkID string Owner string Config types.Metadata - Backend string Timestamp time.Time } type SinkerUpdateEvent struct { - OwnerID string SinkID string + Owner string State string - Size string + Msg string Timestamp time.Time } -func (sue *SinksUpdateEvent) Decode(values map[string]interface{}) { - sue.SinkID = values["sink_id"].(string) - sue.Owner = values["owner"].(string) - sue.Config = types.FromMap(values["config"].(map[string]interface{})) - sue.Backend = values["backend"].(string) - var err error - sue.Timestamp, err = time.Parse(time.RFC3339, values["timestamp"].(string)) - if err != nil { - sue.Timestamp = time.Now() - } -} - -func (cse *SinkerUpdateEvent) Decode(values map[string]interface{}) { - cse.OwnerID = values["owner_id"].(string) - cse.SinkID = values["sink_id"].(string) - cse.State = values["state"].(string) - cse.Size = values["size"].(string) - var err error - cse.Timestamp, err = time.Parse(time.RFC3339, values["timestamp"].(string)) - if err != nil { - cse.Timestamp = time.Now() - } -} - -func (cse *SinkerUpdateEvent) Encode() map[string]interface{} { +func (cse SinkerUpdateEvent) Encode() map[string]interface{} { return map[string]interface{}{ "sink_id": cse.SinkID, - "owner": cse.OwnerID, + "owner": cse.Owner, "state": cse.State, + "msg": cse.Msg, "timestamp": cse.Timestamp.Unix(), "operation": SinkerUpdate, } diff --git a/maestro/redis/events_test.go b/maestro/redis/events_test.go deleted file mode 100644 index 869b2f247..000000000 --- a/maestro/redis/events_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package redis - -import ( - "github.com/stretchr/testify/assert" - "testing" - "time" -) - -func TestSinkerUpdateEvent_Decode(t *testing.T) { - type fields struct { - OwnerID string - SinkID string - State string - Size string - } - type args struct { - values map[string]interface{} - } - tests := []struct { - name string - fields fields - args args - }{ - {name: "test_decode_allfields", fields: fields{ - OwnerID: "owner-1", - SinkID: "sink-1", - State: "active", - Size: "111", - }, args: args{ - values: map[string]interface{}{ - "owner_id": "owner-1", - "sink_id": "sink-1", - "state": "active", - "size": "111", - "timestamp": time.Now().Format(time.RFC3339), - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cse := SinkerUpdateEvent{} - cse.Decode(tt.args.values) - assert.Equal(t, tt.fields.OwnerID, cse.OwnerID) - assert.Equal(t, tt.fields.SinkID, cse.SinkID) - assert.Equal(t, tt.fields.State, cse.State) - assert.Equal(t, tt.fields.Size, cse.Size) - }) - } -} diff --git a/maestro/redis/producer/sink_status.go b/maestro/redis/producer/sink_status.go deleted file mode 100644 index b2e4b8c78..000000000 --- a/maestro/redis/producer/sink_status.go +++ /dev/null @@ -1,68 +0,0 @@ -package producer - -import ( - "context" - "time" - - "github.com/go-redis/redis/v8" - "go.uber.org/zap" -) - -const ( - streamID = "orb.maestro.sink_status" - streamLen = 1000 -) - -type SinkStatusEvent struct { - ownerId string - sinkId string - status string - errorMessage string -} - -func (e SinkStatusEvent) Encode() map[string]interface{} { - return map[string]interface{}{ - "owner_id": e.ownerId, - "sink_id": e.sinkId, - "status": e.status, - "error_message": e.errorMessage, - "timestamp": time.Now().Format(time.RFC3339), - } -} - -type Producer interface { - // PublishSinkStatus to be used to publish the sink activity to the sinker - PublishSinkStatus(ctx context.Context, ownerId string, sinkId string, status string, errorMessage string) error -} - -type maestroProducer struct { - logger *zap.Logger - streamRedis *redis.Client -} - -func NewMaestroProducer(logger *zap.Logger, streamRedis *redis.Client) Producer { - return &maestroProducer{logger: logger, streamRedis: streamRedis} -} - -// PublishSinkStatus to be used to publish the sink activity to the sinker -func (p *maestroProducer) PublishSinkStatus(ctx context.Context, ownerId string, sinkId string, status string, errorMessage string) error { - event := SinkStatusEvent{ - ownerId: ownerId, - sinkId: sinkId, - status: status, - errorMessage: errorMessage, - } - streamEvent := event.Encode() - record := &redis.XAddArgs{ - Stream: streamID, - MaxLen: streamLen, - Approx: true, - Values: streamEvent, - } - cmd := p.streamRedis.XAdd(ctx, record) - if cmd.Err() != nil { - p.logger.Error("error sending event to maestro event store", zap.Error(cmd.Err())) - return cmd.Err() - } - return nil -} diff --git a/maestro/service.go b/maestro/service.go index bf391de97..c30c61e20 100644 --- a/maestro/service.go +++ b/maestro/service.go @@ -10,19 +10,17 @@ package maestro import ( "context" + "encoding/json" + "github.com/orb-community/orb/maestro/monitor" + "github.com/orb-community/orb/pkg/types" + "strings" - kitprometheus "github.com/go-kit/kit/metrics/prometheus" "github.com/go-redis/redis/v8" - "github.com/jmoiron/sqlx" - "github.com/orb-community/orb/maestro/deployment" + maestroconfig "github.com/orb-community/orb/maestro/config" "github.com/orb-community/orb/maestro/kubecontrol" - "github.com/orb-community/orb/maestro/monitor" rediscons1 "github.com/orb-community/orb/maestro/redis/consumer" - "github.com/orb-community/orb/maestro/redis/producer" - "github.com/orb-community/orb/maestro/service" "github.com/orb-community/orb/pkg/config" sinkspb "github.com/orb-community/orb/sinks/pb" - stdprometheus "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" ) @@ -32,56 +30,30 @@ type maestroService struct { serviceContext context.Context serviceCancelFunc context.CancelFunc - deploymentService deployment.Service - sinkListenerService rediscons1.SinksListener - activityListener rediscons1.SinkerActivityListener - kubecontrol kubecontrol.Service monitor monitor.Service logger *zap.Logger streamRedisClient *redis.Client sinkerRedisClient *redis.Client sinksClient sinkspb.SinkServiceClient - eventService service.EventService esCfg config.EsConfig + eventStore rediscons1.Subscriber kafkaUrl string } -func NewMaestroService(logger *zap.Logger, streamRedisClient *redis.Client, sinkerRedisClient *redis.Client, - sinksGrpcClient sinkspb.SinkServiceClient, otelCfg config.OtelConfig, db *sqlx.DB, svcCfg config.BaseSvcConfig) Service { +func NewMaestroService(logger *zap.Logger, streamRedisClient *redis.Client, sinkerRedisClient *redis.Client, sinksGrpcClient sinkspb.SinkServiceClient, esCfg config.EsConfig, otelCfg config.OtelConfig) Service { kubectr := kubecontrol.NewService(logger) - repo := deployment.NewRepositoryService(db, logger) - maestroProducer := producer.NewMaestroProducer(logger, streamRedisClient) - deploymentService := deployment.NewDeploymentService(logger, repo, otelCfg.KafkaUrl, svcCfg.EncryptionKey, maestroProducer, kubectr) - ps := producer.NewMaestroProducer(logger, streamRedisClient) - monitorService := monitor.NewMonitorService(logger, &sinksGrpcClient, ps, &kubectr, deploymentService) - eventService := service.NewEventService(logger, deploymentService, &sinksGrpcClient) - eventService = service.NewTracingService(logger, eventService, - kitprometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: "maestro", - Subsystem: "comms", - Name: "message_count", - Help: "Number of messages received.", - }, []string{"method", "sink_id", "owner_id"}), - kitprometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ - Namespace: "maestro", - Subsystem: "comms", - Name: "message_latency_microseconds", - Help: "Total duration of messages processed in microseconds.", - }, []string{"method", "sink_id", "owner_id"})) - sinkListenerService := rediscons1.NewSinksListenerController(logger, eventService, streamRedisClient, sinksGrpcClient) - activityListener := rediscons1.NewSinkerActivityListener(logger, eventService, streamRedisClient) + eventStore := rediscons1.NewEventStore(streamRedisClient, sinkerRedisClient, otelCfg.KafkaUrl, kubectr, esCfg.Consumer, sinksGrpcClient, logger) + monitorService := monitor.NewMonitorService(logger, &sinksGrpcClient, eventStore, &kubectr) return &maestroService{ - logger: logger, - deploymentService: deploymentService, - streamRedisClient: streamRedisClient, - sinkerRedisClient: sinkerRedisClient, - sinksClient: sinksGrpcClient, - sinkListenerService: sinkListenerService, - activityListener: activityListener, - kubecontrol: kubectr, - monitor: monitorService, - kafkaUrl: otelCfg.KafkaUrl, + logger: logger, + streamRedisClient: streamRedisClient, + sinkerRedisClient: sinkerRedisClient, + sinksClient: sinksGrpcClient, + kubecontrol: kubectr, + monitor: monitorService, + eventStore: eventStore, + kafkaUrl: otelCfg.KafkaUrl, } } @@ -91,48 +63,103 @@ func NewMaestroService(logger *zap.Logger, streamRedisClient *redis.Client, sink // And for each sink with active state, deploy OtelCollector func (svc *maestroService) Start(ctx context.Context, cancelFunction context.CancelFunc) error { + loadCtx, loadCancelFunction := context.WithCancel(ctx) + defer loadCancelFunction() svc.serviceContext = ctx svc.serviceCancelFunc = cancelFunction + sinksRes, err := svc.sinksClient.RetrieveSinks(loadCtx, &sinkspb.SinksFilterReq{OtelEnabled: "enabled"}) + if err != nil { + loadCancelFunction() + return err + } + + pods, err := svc.monitor.GetRunningPods(ctx) + if err != nil { + loadCancelFunction() + return err + } + + for _, sinkRes := range sinksRes.Sinks { + sinkContext := context.WithValue(loadCtx, "sink-id", sinkRes.Id) + var metadata types.Metadata + if err := json.Unmarshal(sinkRes.Config, &metadata); err != nil { + svc.logger.Warn("failed to unmarshal sink, skipping", zap.String("sink-id", sinkRes.Id)) + continue + } + if val, _ := svc.eventStore.GetDeploymentEntryFromSinkId(ctx, sinkRes.Id); val != "" { + svc.logger.Info("Skipping deploymentEntry because it is already created") + } else { + var data maestroconfig.SinkData + data.SinkID = sinkRes.Id + data.Config = metadata + data.Backend = sinkRes.Backend + err := svc.eventStore.CreateDeploymentEntry(sinkContext, data) + if err != nil { + svc.logger.Warn("failed to create deploymentEntry for sink, skipping", zap.String("sink-id", sinkRes.Id)) + continue + } + err = svc.eventStore.UpdateSinkCache(ctx, data) + if err != nil { + svc.logger.Warn("failed to update cache for sink", zap.String("sink-id", sinkRes.Id)) + continue + } + svc.logger.Info("successfully created deploymentEntry for sink", zap.String("sink-id", sinkRes.Id), zap.String("state", sinkRes.State)) + } + + isDeployed := false + if len(pods) > 0 { + for _, pod := range pods { + if strings.Contains(pod, sinkRes.Id) { + isDeployed = true + break + } + } + } + // if State is Active, deploy OtelCollector + if sinkRes.State == "active" && !isDeployed { + deploymentEntry, err := svc.eventStore.GetDeploymentEntryFromSinkId(sinkContext, sinkRes.Id) + if err != nil { + svc.logger.Warn("failed to fetch deploymentEntry for sink, skipping", zap.String("sink-id", sinkRes.Id), zap.Error(err)) + continue + } + err = svc.kubecontrol.CreateOtelCollector(sinkContext, sinkRes.OwnerID, sinkRes.Id, deploymentEntry) + if err != nil { + svc.logger.Warn("failed to deploy OtelCollector for sink, skipping", zap.String("sink-id", sinkRes.Id), zap.Error(err)) + continue + } + svc.logger.Info("successfully created otel collector for sink", zap.String("sink-id", sinkRes.Id)) + } + } + go svc.subscribeToSinksEvents(ctx) - go svc.subscribeToSinkerIdleEvents(ctx) - go svc.subscribeToSinkerActivityEvents(ctx) + go svc.subscribeToSinkerEvents(ctx) monitorCtx := context.WithValue(ctx, "routine", "monitor") - err := svc.monitor.Start(monitorCtx, cancelFunction) + err = svc.monitor.Start(monitorCtx, cancelFunction) if err != nil { svc.logger.Error("error during monitor routine start", zap.Error(err)) cancelFunction() return err } - svc.logger.Info("Maestro service started") return nil } -func (svc *maestroService) Stop() { - svc.serviceCancelFunc() - svc.logger.Info("Maestro service stopped") -} - func (svc *maestroService) subscribeToSinksEvents(ctx context.Context) { - if err := svc.sinkListenerService.SubscribeSinksEvents(ctx); err != nil { + if err := svc.eventStore.SubscribeSinksEvents(ctx); err != nil { svc.logger.Error("Bootstrap service failed to subscribe to event sourcing", zap.Error(err)) + return } svc.logger.Info("finished reading sinks events") ctx.Done() } -func (svc *maestroService) subscribeToSinkerIdleEvents(ctx context.Context) { - if err := svc.activityListener.SubscribeSinkerIdleEvents(ctx); err != nil { +func (svc *maestroService) subscribeToSinkerEvents(ctx context.Context) { + if err := svc.eventStore.SubscribeSinkerEvents(ctx); err != nil { svc.logger.Error("Bootstrap service failed to subscribe to event sourcing", zap.Error(err)) + return } - svc.logger.Info("finished reading sinker_idle events") -} - -func (svc *maestroService) subscribeToSinkerActivityEvents(ctx context.Context) { - if err := svc.activityListener.SubscribeSinkerActivityEvents(ctx); err != nil { - svc.logger.Error("Bootstrap service failed to subscribe to event sourcing", zap.Error(err)) - } - svc.logger.Info("finished reading sinker_activity events") + svc.logger.Info("finished reading sinker events") + ctx.Done() } diff --git a/maestro/service/deploy_service.go b/maestro/service/deploy_service.go deleted file mode 100644 index 09f4d5580..000000000 --- a/maestro/service/deploy_service.go +++ /dev/null @@ -1,198 +0,0 @@ -package service - -import ( - "context" - "encoding/json" - maestroerrors "github.com/orb-community/orb/maestro/errors" - "github.com/orb-community/orb/pkg/types" - "github.com/orb-community/orb/sinks/pb" - "time" - - "github.com/orb-community/orb/maestro/deployment" - maestroredis "github.com/orb-community/orb/maestro/redis" - "github.com/orb-community/orb/pkg/errors" - "go.uber.org/zap" -) - -// EventService will hold the business logic of the handling events from both Listeners -type EventService interface { - HandleSinkCreate(ctx context.Context, event maestroredis.SinksUpdateEvent) error - HandleSinkUpdate(ctx context.Context, event maestroredis.SinksUpdateEvent) error - HandleSinkDelete(ctx context.Context, event maestroredis.SinksUpdateEvent) error - HandleSinkActivity(ctx context.Context, event maestroredis.SinkerUpdateEvent) error - HandleSinkIdle(ctx context.Context, event maestroredis.SinkerUpdateEvent) error -} - -type eventService struct { - logger *zap.Logger - deploymentService deployment.Service - sinkGrpcClient pb.SinkServiceClient - // Configuration for KafkaURL from Orb Deployment - kafkaUrl string -} - -var _ EventService = (*eventService)(nil) - -func NewEventService(logger *zap.Logger, service deployment.Service, sinksGrpcClient *pb.SinkServiceClient) EventService { - namedLogger := logger.Named("deploy-service") - return &eventService{logger: namedLogger, deploymentService: service, sinkGrpcClient: *sinksGrpcClient} -} - -// HandleSinkCreate will create deployment entry in postgres, will create deployment in Redis, to prepare for SinkActivity -func (d *eventService) HandleSinkCreate(ctx context.Context, event maestroredis.SinksUpdateEvent) error { - d.logger.Debug("handling sink create event", zap.String("sink-id", event.SinkID), zap.String("owner-id", event.Owner)) - // Create Deployment Entry - entry := deployment.NewDeployment(event.Owner, event.SinkID, event.Config, event.Backend) - // Use deploymentService, which will create deployment in both postgres and redis - err := d.deploymentService.CreateDeployment(ctx, &entry) - if err != nil { - d.logger.Error("error trying to create deployment entry", zap.Error(err)) - return err - } - return nil -} - -func (d *eventService) HandleSinkUpdate(ctx context.Context, event maestroredis.SinksUpdateEvent) error { - now := time.Now() - d.logger.Debug("handling sink update event", zap.String("sink-id", event.SinkID)) - // check if exists deployment entry from postgres - entry, _, err := d.deploymentService.GetDeployment(ctx, event.Owner, event.SinkID) - if err != nil { - if err.Error() != "not found" { - d.logger.Error("error trying to get deployment entry", zap.Error(err)) - return err - } else { - newEntry := deployment.NewDeployment(event.Owner, event.SinkID, event.Config, event.Backend) - err := d.deploymentService.CreateDeployment(ctx, &newEntry) - if err != nil { - d.logger.Error("error trying to recreate deployment entry", zap.Error(err)) - return err - } - entry = &newEntry - } - } - // update deployment entry in postgres - err = entry.SetConfig(event.Config) - if err != nil { - return err - } - entry.LastCollectorStopTime = &now - entry.LastStatus = "unknown" - entry.LastStatusUpdate = &now - entry.LastErrorMessage = "" - entry.LastErrorTime = nil - err = d.deploymentService.UpdateDeployment(ctx, entry) - - return nil -} - -func (d *eventService) HandleSinkDelete(ctx context.Context, event maestroredis.SinksUpdateEvent) error { - d.logger.Debug("handling sink delete event", zap.String("sink-id", event.SinkID)) - deploymentEntry, _, err := d.deploymentService.GetDeployment(ctx, event.Owner, event.SinkID) - if err != nil { - d.logger.Warn("did not find collector entry for sink", zap.String("sink-id", event.SinkID)) - return err - } - if deploymentEntry.LastCollectorDeployTime == nil || deploymentEntry.LastCollectorDeployTime.Before(time.Now()) { - if deploymentEntry.LastCollectorStopTime == nil || deploymentEntry.LastCollectorStopTime.Before(time.Now()) { - d.logger.Warn("collector is not running, skipping") - } - } - err = d.deploymentService.RemoveDeployment(ctx, event.Owner, event.SinkID) - if err != nil { - d.logger.Warn("error removing deployment entry, deployment will be orphan", zap.Error(err)) - return err - } - return nil -} - -func (d *eventService) HandleSinkActivity(ctx context.Context, event maestroredis.SinkerUpdateEvent) error { - if event.State != "active" { - d.logger.Error("trying to deploy sink that is not active", zap.String("sink-id", event.SinkID), - zap.String("status", event.State)) - return errors.New("trying to deploy sink that is not active") - } - deploymentEntry, _, err := d.deploymentService.GetDeployment(ctx, event.OwnerID, event.SinkID) - if err != nil { - if err == maestroerrors.NotFound { - d.logger.Info("did not find collector entry for sink, retrieving from sinks grpc", zap.String("sink-id", event.SinkID)) - sink, err := d.sinkGrpcClient.RetrieveSink(ctx, &pb.SinkByIDReq{ - SinkID: event.SinkID, - OwnerID: event.OwnerID, - }) - if err != nil { - d.logger.Error("error retrieving sink from grpc", zap.Error(err)) - return err - } - metadata := make(map[string]interface{}) - err = json.Unmarshal(sink.Config, &metadata) - if err != nil { - d.logger.Error("error unmarshalling sink metadata", zap.Error(err)) - return err - } - newEntry := deployment.NewDeployment(event.OwnerID, event.SinkID, types.FromMap(metadata), sink.Backend) - err = d.deploymentService.CreateDeployment(ctx, &newEntry) - if err != nil { - d.logger.Error("error trying to recreate deployment entry", zap.Error(err)) - return err - } - deploymentEntry, _, err = d.deploymentService.GetDeployment(ctx, event.OwnerID, event.SinkID) - if err != nil { - d.logger.Error("error trying to recreate deployment entry", zap.Error(err)) - return err - } - } else { - d.logger.Warn("did not find collector entry for sink", zap.String("sink-id", event.SinkID)) - return err - } - } - d.logger.Debug("handling sink activity event", zap.String("sink-id", event.SinkID), zap.String("deployment-status", deploymentEntry.LastStatus)) - if deploymentEntry.LastStatus == "unknown" || deploymentEntry.LastStatus == "idle" { - // async update sink status to provisioning - go func() { - err := d.deploymentService.UpdateStatus(ctx, event.OwnerID, event.SinkID, "provisioning", "") - if err != nil { - d.logger.Error("error updating status to provisioning", zap.Error(err)) - } - }() - _, err = d.deploymentService.NotifyCollector(ctx, event.OwnerID, event.SinkID, "deploy", "", "") - if err != nil { - d.logger.Error("error trying to notify collector", zap.Error(err)) - err2 := d.deploymentService.UpdateStatus(ctx, event.OwnerID, event.SinkID, "provisioning_error", err.Error()) - if err2 != nil { - d.logger.Warn("error during notifying provisioning error, customer will not be notified of error") - d.logger.Error("error during update provisioning error status", zap.Error(err)) - return err - } - return err - } - return nil - } else { - d.logger.Warn("collector is already running, skipping", zap.String("last_status", deploymentEntry.LastStatus)) - return nil - } -} - -func (d *eventService) HandleSinkIdle(ctx context.Context, event maestroredis.SinkerUpdateEvent) error { - // check if exists deployment entry from postgres - d.logger.Debug("handling sink idle event", zap.String("sink-id", event.SinkID), zap.String("owner-id", event.OwnerID)) - // async update sink status to idle - go func() { - err := d.deploymentService.UpdateStatus(ctx, event.OwnerID, event.SinkID, "idle", "") - if err != nil { - d.logger.Error("error updating status to idle", zap.Error(err)) - } - }() - // dropping idle otel collector - _, err := d.deploymentService.NotifyCollector(ctx, event.OwnerID, event.SinkID, "delete", "idle", "") - if err != nil { - d.logger.Error("error trying to notify collector", zap.Error(err)) - err2 := d.deploymentService.UpdateStatus(ctx, event.OwnerID, event.SinkID, "provisioning_error", err.Error()) - if err2 != nil { - d.logger.Warn("error during notifying provisioning error, customer will not be notified of error") - } - return err - } - - return nil -} diff --git a/maestro/service/handle_sinker_test.go b/maestro/service/handle_sinker_test.go deleted file mode 100644 index d5ccba856..000000000 --- a/maestro/service/handle_sinker_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package service - -import ( - "context" - "github.com/orb-community/orb/maestro/deployment" - "github.com/orb-community/orb/maestro/redis" - "github.com/orb-community/orb/pkg/types" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "testing" - "time" -) - -func TestEventService_HandleSinkActivity(t *testing.T) { - t.Skip() - type args struct { - event redis.SinkerUpdateEvent - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "activity on a sink that does not exist", - args: args{ - event: redis.SinkerUpdateEvent{ - OwnerID: "owner1", - SinkID: "sink1", - State: "active", - Size: "22", - Timestamp: time.Now(), - }, - }, - wantErr: true, - }, - { - name: "activity success", - args: args{ - event: redis.SinkerUpdateEvent{ - OwnerID: "owner2", - SinkID: "sink22", - State: "active", - Size: "22", - Timestamp: time.Now(), - }, - }, wantErr: false, - }, - } - logger := zap.NewNop() - deploymentService := deployment.NewDeploymentService(logger, NewFakeRepository(logger), "kafka:9092", - "MY_SECRET", NewTestProducer(logger), NewTestKubeCtr(logger)) - d := NewEventService(logger, deploymentService, nil) - err := d.HandleSinkCreate(context.Background(), redis.SinksUpdateEvent{ - SinkID: "sink22", - Owner: "owner2", - Backend: "prometheus", - Config: types.Metadata{ - "exporter": types.Metadata{ - "remote_host": "https://acme.com/prom/push", - }, - "authentication": types.Metadata{ - "type": "basicauth", - "username": "prom-user-2", - "password": "dbpass-2", - }, - }, - }) - require.NoError(t, err, "should not error") - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.WithValue(context.Background(), "test", tt.name) - if err := d.HandleSinkActivity(ctx, tt.args.event); (err != nil) != tt.wantErr { - t.Errorf("HandleSinkActivity() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestEventService_HandleSinkIdle(t *testing.T) { - t.Skip() - type args struct { - event redis.SinkerUpdateEvent - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "sink idle on a sink that does not exist", - args: args{ - event: redis.SinkerUpdateEvent{ - OwnerID: "owner1", - SinkID: "sink1", - State: "idle", - Size: "22", - Timestamp: time.Now(), - }, - }, - wantErr: true, - }, - { - name: "sink idle success", - args: args{ - event: redis.SinkerUpdateEvent{ - OwnerID: "owner2", - SinkID: "sink222", - State: "idle", - Size: "22", - Timestamp: time.Now(), - }, - }, wantErr: false, - }, - } - logger := zap.NewNop() - deploymentService := deployment.NewDeploymentService(logger, NewFakeRepository(logger), "kafka:9092", "MY_SECRET", NewTestProducer(logger), - NewTestKubeCtr(logger)) - v := NewSinksPb(logger) - d := NewEventService(logger, deploymentService, &v) - err := d.HandleSinkCreate(context.Background(), redis.SinksUpdateEvent{ - SinkID: "sink222", - Owner: "owner2", - Backend: "prometheus", - Config: types.Metadata{ - "exporter": types.Metadata{ - "remote_host": "https://acme.com/prom/push", - }, - "authentication": types.Metadata{ - "type": "basicauth", - "username": "prom-user-2", - "password": "dbpass-2", - }, - }, - }) - require.NoError(t, err, "should not error") - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.WithValue(context.Background(), "test", tt.name) - if err := d.HandleSinkIdle(ctx, tt.args.event); (err != nil) != tt.wantErr { - t.Errorf("HandleSinkIdle() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/maestro/service/handle_sinks_test.go b/maestro/service/handle_sinks_test.go deleted file mode 100644 index a2e46e940..000000000 --- a/maestro/service/handle_sinks_test.go +++ /dev/null @@ -1,223 +0,0 @@ -package service - -import ( - "context" - "github.com/orb-community/orb/maestro/deployment" - "github.com/orb-community/orb/maestro/redis" - "github.com/orb-community/orb/pkg/types" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "testing" - "time" -) - -func Test_eventService_HandleSinkCreate(t *testing.T) { - t.Skip() - type args struct { - event redis.SinksUpdateEvent - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "create event", - args: args{ - event: redis.SinksUpdateEvent{ - SinkID: "crt-sink1", - Owner: "owner1", - Config: types.Metadata{ - "exporter": types.Metadata{ - "remote_host": "https://acme.com/prom/push", - }, - "authentication": types.Metadata{ - "type": "basicauth", - "username": "prom-user", - "password": "dbpass", - }, - }, - Backend: "prometheus", - Timestamp: time.Now(), - }, - }, - wantErr: false, - }, - { - name: "create event without config", - args: args{ - event: redis.SinksUpdateEvent{ - SinkID: "crt-sink1", - Owner: "owner1", - Config: nil, - Backend: "prometheus", - Timestamp: time.Now(), - }, - }, - wantErr: true, - }, - } - logger := zap.NewNop() - deploymentService := deployment.NewDeploymentService(logger, NewFakeRepository(logger), "kafka:9092", "MY_SECRET", NewTestProducer(logger), nil) - d := NewEventService(logger, deploymentService, nil) - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.WithValue(context.Background(), "test", tt.name) - if err := d.HandleSinkCreate(ctx, tt.args.event); (err != nil) != tt.wantErr { - t.Errorf("HandleSinkCreate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestEventService_HandleSinkUpdate(t *testing.T) { - t.Skip() - type args struct { - event redis.SinksUpdateEvent - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "update event when there is none in db", - args: args{ - event: redis.SinksUpdateEvent{ - SinkID: "upd-sink1", - Owner: "owner1", - Config: types.Metadata{ - "exporter": types.Metadata{ - "remote_host": "https://acme.com/prom/push", - }, - "authentication": types.Metadata{ - "type": "basicauth", - "username": "prom-user", - "password": "dbpass", - }, - }, - Backend: "prometheus", - Timestamp: time.Now(), - }, - }, - wantErr: false, - }, - { - name: "update event success", - args: args{ - event: redis.SinksUpdateEvent{ - SinkID: "upd-sink1", - Owner: "owner1", - Backend: "prometheus", - Config: types.Metadata{ - "exporter": types.Metadata{ - "remote_host": "https://acme.com/prom/push", - }, - "authentication": types.Metadata{ - "type": "basicauth", - "username": "prom-user-2", - "password": "dbpass-2", - }, - }, - Timestamp: time.Now(), - }, - }, - wantErr: false, - }, - } - logger := zap.NewNop() - deploymentService := deployment.NewDeploymentService(logger, NewFakeRepository(logger), "kafka:9092", "MY_SECRET", NewTestProducer(logger), - NewTestKubeCtr(logger)) - v := NewSinksPb(logger) - d := NewEventService(logger, deploymentService, &v) - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.WithValue(context.Background(), "test", tt.name) - if err := d.HandleSinkUpdate(ctx, tt.args.event); (err != nil) != tt.wantErr { - t.Errorf("HandleSinkUpdate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestEventService_HandleSinkDelete(t *testing.T) { - t.Skip() - type args struct { - event redis.SinksUpdateEvent - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "delete event when there is none in db", - args: args{ - event: redis.SinksUpdateEvent{ - SinkID: "sink1", - Owner: "owner1", - Backend: "prometheus", - Config: types.Metadata{ - "exporter": types.Metadata{ - "remote_host": "https://acme.com/prom/push", - }, - "authentication": types.Metadata{ - "type": "basicauth", - "username": "prom-user-2", - "password": "dbpass-2", - }, - }, - }, - }, - wantErr: true, - }, - { - name: "delete event success", - args: args{ - event: redis.SinksUpdateEvent{ - SinkID: "sink2-1", - Owner: "owner2", - Backend: "prometheus", - Config: types.Metadata{ - "exporter": types.Metadata{ - "remote_host": "https://acme.com/prom/push", - }, - "authentication": types.Metadata{ - "type": "basicauth", - "username": "prom-user-2", - "password": "dbpass-2", - }, - }, - }, - }, - wantErr: false, - }, - } - logger := zap.NewNop() - deploymentService := deployment.NewDeploymentService(logger, NewFakeRepository(logger), "kafka:9092", "MY_SECRET", NewTestProducer(logger), nil) - d := NewEventService(logger, deploymentService, nil) - err := d.HandleSinkCreate(context.Background(), redis.SinksUpdateEvent{ - SinkID: "sink2-1", - Owner: "owner2", - Backend: "prometheus", - Config: types.Metadata{ - "exporter": types.Metadata{ - "remote_host": "https://acme.com/prom/push", - }, - "authentication": types.Metadata{ - "type": "basicauth", - "username": "prom-user-2", - "password": "dbpass-2", - }, - }, - }) - require.NoError(t, err, "should not error") - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.WithValue(context.Background(), "test", tt.name) - if err := d.HandleSinkDelete(ctx, tt.args.event); (err != nil) != tt.wantErr { - t.Errorf("HandleSinkDelete() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/maestro/service/kubectr_test.go b/maestro/service/kubectr_test.go deleted file mode 100644 index fb449a8cc..000000000 --- a/maestro/service/kubectr_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package service - -import ( - "context" - "github.com/orb-community/orb/maestro/kubecontrol" - "go.uber.org/zap" -) - -type testKubeCtr struct { - logger *zap.Logger -} - -func NewTestKubeCtr(logger *zap.Logger) kubecontrol.Service { - return &testKubeCtr{logger: logger} -} - -func (t *testKubeCtr) CreateOtelCollector(ctx context.Context, ownerID, sinkID, deploymentEntry string) (string, error) { - name := "test-collector" - return name, nil -} - -func (t *testKubeCtr) KillOtelCollector(ctx context.Context, deploymentName, sinkID string) error { - return nil -} diff --git a/maestro/service/metrics_middleware.go b/maestro/service/metrics_middleware.go deleted file mode 100644 index 81f8d8df5..000000000 --- a/maestro/service/metrics_middleware.go +++ /dev/null @@ -1,85 +0,0 @@ -package service - -import ( - "context" - "github.com/go-kit/kit/metrics" - maestroredis "github.com/orb-community/orb/maestro/redis" - "go.uber.org/zap" - "time" -) - -type tracingService struct { - logger *zap.Logger - counter metrics.Counter - latency metrics.Histogram - nextService EventService -} - -func NewTracingService(logger *zap.Logger, service EventService, counter metrics.Counter, latency metrics.Histogram) EventService { - return &tracingService{logger: logger, nextService: service, counter: counter, latency: latency} -} - -func (t *tracingService) HandleSinkCreate(ctx context.Context, event maestroredis.SinksUpdateEvent) error { - defer func(begun time.Time) { - labels := []string{ - "method", "HandleSinkCreate", - "sink_id", event.SinkID, - "owner_id", event.Owner, - } - t.counter.With(labels...).Add(1) - t.latency.With(labels...).Observe(float64(time.Since(begun).Microseconds())) - }(time.Now()) - return t.nextService.HandleSinkCreate(ctx, event) -} - -func (t *tracingService) HandleSinkUpdate(ctx context.Context, event maestroredis.SinksUpdateEvent) error { - defer func(begun time.Time) { - labels := []string{ - "method", "HandleSinkCreate", - "sink_id", event.SinkID, - "owner_id", event.Owner, - } - t.counter.With(labels...).Add(1) - t.latency.With(labels...).Observe(float64(time.Since(begun).Microseconds())) - }(time.Now()) - return t.nextService.HandleSinkUpdate(ctx, event) -} - -func (t *tracingService) HandleSinkDelete(ctx context.Context, event maestroredis.SinksUpdateEvent) error { - defer func(begun time.Time) { - labels := []string{ - "method", "HandleSinkCreate", - "sink_id", event.SinkID, - "owner_id", event.Owner, - } - t.counter.With(labels...).Add(1) - t.latency.With(labels...).Observe(float64(time.Since(begun).Microseconds())) - }(time.Now()) - return t.nextService.HandleSinkDelete(ctx, event) -} - -func (t *tracingService) HandleSinkActivity(ctx context.Context, event maestroredis.SinkerUpdateEvent) error { - defer func(begun time.Time) { - labels := []string{ - "method", "HandleSinkCreate", - "sink_id", event.SinkID, - "owner_id", event.OwnerID, - } - t.counter.With(labels...).Add(1) - t.latency.With(labels...).Observe(float64(time.Since(begun).Microseconds())) - }(time.Now()) - return t.nextService.HandleSinkActivity(ctx, event) -} - -func (t *tracingService) HandleSinkIdle(ctx context.Context, event maestroredis.SinkerUpdateEvent) error { - defer func(begun time.Time) { - labels := []string{ - "method", "HandleSinkCreate", - "sink_id", event.SinkID, - "owner_id", event.OwnerID, - } - t.counter.With(labels...).Add(1) - t.latency.With(labels...).Observe(float64(time.Since(begun).Microseconds())) - }(time.Now()) - return t.nextService.HandleSinkIdle(ctx, event) -} diff --git a/maestro/service/pbmock_test.go b/maestro/service/pbmock_test.go deleted file mode 100644 index 057b97c50..000000000 --- a/maestro/service/pbmock_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package service - -import ( - "context" - "github.com/orb-community/orb/sinks/pb" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type mockSinksPb struct { - logger *zap.Logger -} - -func NewSinksPb(logger *zap.Logger) pb.SinkServiceClient { - return &mockSinksPb{logger: logger} -} - -var _ pb.SinkServiceClient = (*mockSinksPb)(nil) - -func (m mockSinksPb) RetrieveSink(ctx context.Context, in *pb.SinkByIDReq, opts ...grpc.CallOption) (*pb.SinkRes, error) { - return nil, nil -} - -func (m mockSinksPb) RetrieveSinks(ctx context.Context, in *pb.SinksFilterReq, opts ...grpc.CallOption) (*pb.SinksRes, error) { - return nil, nil -} diff --git a/maestro/service/producer_test.go b/maestro/service/producer_test.go deleted file mode 100644 index e108b1bfa..000000000 --- a/maestro/service/producer_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package service - -import ( - "context" - "github.com/orb-community/orb/maestro/redis/producer" - "go.uber.org/zap" -) - -type testProducer struct { - logger *zap.Logger -} - -func NewTestProducer(logger *zap.Logger) producer.Producer { - return &testProducer{logger: logger} -} - -func (t *testProducer) PublishSinkStatus(_ context.Context, _ string, _ string, _ string, _ string) error { - return nil -} diff --git a/maestro/service/repository_test.go b/maestro/service/repository_test.go deleted file mode 100644 index 13e89518b..000000000 --- a/maestro/service/repository_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package service - -import ( - "context" - "errors" - "github.com/orb-community/orb/maestro/deployment" - "go.uber.org/zap" -) - -type fakeRepository struct { - logger *zap.Logger - inMemoryDict map[string]*deployment.Deployment -} - -func NewFakeRepository(logger *zap.Logger) deployment.Repository { - return &fakeRepository{logger: logger, inMemoryDict: make(map[string]*deployment.Deployment)} -} - -func (f *fakeRepository) FetchAll(ctx context.Context) ([]deployment.Deployment, error) { - var allDeployments []deployment.Deployment - for _, deploy := range f.inMemoryDict { - copy := copyDeploy(deploy) - allDeployments = append(allDeployments, copy) - } - return allDeployments, nil -} - -func (f *fakeRepository) Add(_ context.Context, deployment *deployment.Deployment) (*deployment.Deployment, error) { - deployment.Id = "fake-id" - copy := copyDeploy(deployment) - f.inMemoryDict[deployment.SinkID] = © - return deployment, nil -} - -func (f *fakeRepository) Update(_ context.Context, deployment *deployment.Deployment) (*deployment.Deployment, error) { - copy := copyDeploy(deployment) - f.inMemoryDict[deployment.SinkID] = © - return deployment, nil -} - -func (f *fakeRepository) UpdateStatus(_ context.Context, _ string, _ string, _ string, _ string) error { - return nil -} - -func (f *fakeRepository) Remove(_ context.Context, _ string, sinkId string) error { - delete(f.inMemoryDict, sinkId) - return nil -} - -func (f *fakeRepository) FindByOwnerAndSink(ctx context.Context, _ string, sinkId string) (*deployment.Deployment, error) { - deploy, ok := f.inMemoryDict[sinkId] - if ok { - copy := copyDeploy(deploy) - return ©, nil - } - return nil, errors.New("not found") -} - -func (f *fakeRepository) FindByCollectorName(_ context.Context, _ string) (*deployment.Deployment, error) { - return nil, nil -} - -func copyDeploy(src *deployment.Deployment) deployment.Deployment { - deploy := deployment.Deployment{ - Id: src.Id, - OwnerID: src.OwnerID, - SinkID: src.SinkID, - Backend: src.Backend, - Config: src.Config, - LastStatus: src.LastStatus, - LastStatusUpdate: src.LastStatusUpdate, - LastErrorMessage: src.LastErrorMessage, - LastErrorTime: src.LastErrorTime, - CollectorName: src.CollectorName, - LastCollectorDeployTime: src.LastCollectorDeployTime, - LastCollectorStopTime: src.LastCollectorStopTime, - } - return deploy -} diff --git a/pkg/config/config.go b/pkg/config/config.go index 0ebf319ba..56c0eb8ca 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -65,7 +65,6 @@ type BaseSvcConfig struct { HttpPort string `mapstructure:"http_port"` HttpServerCert string `mapstructure:"server_cert"` HttpServerKey string `mapstructure:"server_key"` - EncryptionKey string `mapstructure:"encryption_key"` } type PostgresConfig struct { diff --git a/policies/api/http/logging.go b/policies/api/http/logging.go index ad35bf933..89de0a412 100644 --- a/policies/api/http/logging.go +++ b/policies/api/http/logging.go @@ -25,7 +25,7 @@ func (l loggingMiddleware) ListDatasetsByGroupIDInternal(ctx context.Context, gr zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: list_datasets_by_group_id_internal", + l.logger.Info("method call: list_datasets_by_group_id_internal", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -39,7 +39,7 @@ func (l loggingMiddleware) RemoveAllDatasetsByPolicyIDInternal(ctx context.Conte zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: remove_all_datasets_by_policy_id_internal", + l.logger.Info("method call: remove_all_datasets_by_policy_id_internal", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -53,7 +53,7 @@ func (l loggingMiddleware) InactivateDatasetByIDInternal(ctx context.Context, ow zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: inactivate_dataset_by_id_internal", + l.logger.Info("method call: inactivate_dataset_by_id_internal", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -67,7 +67,7 @@ func (l loggingMiddleware) ViewDatasetByIDInternal(ctx context.Context, ownerID zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_dataset_by_id_internal", + l.logger.Info("method call: view_dataset_by_id_internal", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -81,7 +81,7 @@ func (l loggingMiddleware) RemoveDataset(ctx context.Context, token string, dsID zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: remove_dataset", + l.logger.Info("method call: remove_dataset", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -95,7 +95,7 @@ func (l loggingMiddleware) EditDataset(ctx context.Context, token string, ds pol zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: edit_dataset", + l.logger.Info("method call: edit_dataset", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -109,7 +109,7 @@ func (l loggingMiddleware) RemovePolicy(ctx context.Context, token string, polic zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: remove_policy", + l.logger.Info("method call: remove_policy", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -123,7 +123,7 @@ func (l loggingMiddleware) ListDatasetsByPolicyIDInternal(ctx context.Context, p zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: list_dataset_by_policy_id", + l.logger.Info("method call: list_dataset_by_policy_id", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -137,7 +137,7 @@ func (l loggingMiddleware) EditPolicy(ctx context.Context, token string, pol pol zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: edit_policy", + l.logger.Info("method call: edit_policy", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -151,7 +151,7 @@ func (l loggingMiddleware) AddPolicy(ctx context.Context, token string, p polici zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: add_policy", + l.logger.Info("method call: add_policy", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -165,7 +165,7 @@ func (l loggingMiddleware) ViewPolicyByID(ctx context.Context, token string, pol zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_policy_by_id", + l.logger.Info("method call: view_policy_by_id", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -179,7 +179,7 @@ func (l loggingMiddleware) ListPolicies(ctx context.Context, token string, pm po zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: list_policies", + l.logger.Info("method call: list_policies", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -193,7 +193,7 @@ func (l loggingMiddleware) ViewPolicyByIDInternal(ctx context.Context, policyID zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_policy_by_id_internal", + l.logger.Info("method call: view_policy_by_id_internal", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -207,7 +207,7 @@ func (l loggingMiddleware) ListPoliciesByGroupIDInternal(ctx context.Context, gr zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: list_policies_by_groups", + l.logger.Info("method call: list_policies_by_groups", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -221,7 +221,7 @@ func (l loggingMiddleware) AddDataset(ctx context.Context, token string, d polic zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: add_dataset", + l.logger.Info("method call: add_dataset", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -235,7 +235,7 @@ func (l loggingMiddleware) InactivateDatasetByGroupID(ctx context.Context, group zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: inactivate_dataset", + l.logger.Info("method call: inactivate_dataset", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -249,7 +249,7 @@ func (l loggingMiddleware) ValidatePolicy(ctx context.Context, token string, p p zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: validate_policy", + l.logger.Info("method call: validate_policy", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -263,7 +263,7 @@ func (l loggingMiddleware) ValidateDataset(ctx context.Context, token string, d zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: validate_dataset", + l.logger.Info("method call: validate_dataset", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -277,7 +277,7 @@ func (l loggingMiddleware) ViewDatasetByID(ctx context.Context, token string, da zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_dataset_by_id", + l.logger.Info("method call: view_dataset_by_id", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -291,7 +291,7 @@ func (l loggingMiddleware) ListDatasets(ctx context.Context, token string, pm po zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: list_dataset", + l.logger.Info("method call: list_dataset", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -305,7 +305,7 @@ func (l loggingMiddleware) DeleteSinkFromAllDatasetsInternal(ctx context.Context zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: delete_sink_from_all_datasets", + l.logger.Info("method call: delete_sink_from_all_datasets", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -319,7 +319,7 @@ func (l loggingMiddleware) DeleteAgentGroupFromAllDatasets(ctx context.Context, zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: delete_agent_group_from_all_datasets", + l.logger.Info("method call: delete_agent_group_from_all_datasets", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -333,7 +333,7 @@ func (l loggingMiddleware) DuplicatePolicy(ctx context.Context, token string, po zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: duplicate_policy", + l.logger.Info("method call: duplicate_policy", zap.Duration("duration", time.Since(begin))) } }(time.Now()) diff --git a/sinker/backend/backend.go b/sinker/backend/backend.go new file mode 100644 index 000000000..884bf9704 --- /dev/null +++ b/sinker/backend/backend.go @@ -0,0 +1,38 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +package backend + +import ( + "github.com/orb-community/orb/fleet" + "github.com/orb-community/orb/fleet/pb" + "github.com/orb-community/orb/sinker/prometheus" +) + +type Backend interface { + ProcessMetrics(agent *pb.AgentInfoRes, thingID string, data fleet.AgentMetricsRPCPayload) ([]prometheus.TimeSeries, error) +} + +var registry = make(map[string]Backend) + +func Register(name string, b Backend) { + registry[name] = b +} + +func GetList() []string { + keys := make([]string, 0, len(registry)) + for k := range registry { + keys = append(keys, k) + } + return keys +} + +func HaveBackend(name string) bool { + _, prs := registry[name] + return prs +} + +func GetBackend(name string) Backend { + return registry[name] +} diff --git a/sinker/backend/pktvisor/pktvisor.go b/sinker/backend/pktvisor/pktvisor.go new file mode 100644 index 000000000..cb31bcefb --- /dev/null +++ b/sinker/backend/pktvisor/pktvisor.go @@ -0,0 +1,474 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +package pktvisor + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/fatih/structs" + "github.com/mitchellh/mapstructure" + "github.com/orb-community/orb/fleet" + "github.com/orb-community/orb/fleet/pb" + "github.com/orb-community/orb/pkg/errors" + "github.com/orb-community/orb/sinker/backend" + "github.com/orb-community/orb/sinker/prometheus" + "go.uber.org/zap" + "golang.org/x/exp/slices" +) + +var _ backend.Backend = (*pktvisorBackend)(nil) + +type pktvisorBackend struct { + logger *zap.Logger +} + +type metricAppendix struct { + agent *pb.AgentInfoRes + agentID string + policyID string + policyName string + deviceList []string + deviceID string + ifList []string + deviceIF string + handlerLabel string + format string + tags map[string]string + logger *zap.Logger + warning string +} + +func (p pktvisorBackend) ProcessMetrics(agent *pb.AgentInfoRes, agentID string, data fleet.AgentMetricsRPCPayload) ([]prometheus.TimeSeries, error) { + // TODO check pktvisor version in data.BEVersion against PktvisorVersion + if data.Format != "json" { + p.logger.Warn("ignoring non-json pktvisor payload", zap.String("format", data.Format)) + return nil, nil + } + // unmarshal pktvisor metrics + var metrics map[string]map[string]interface{} + err := json.Unmarshal(data.Data, &metrics) + if err != nil { + p.logger.Warn("unable to unmarshal pktvisor metric payload", zap.Any("payload", data.Data)) + return nil, err + } + + tags := make(map[string]string) + for k, v := range agent.AgentTags { + tags[k] = v + } + for k, v := range agent.OrbTags { + tags[k] = v + } + + appendix := metricAppendix{ + agent: agent, + agentID: agentID, + policyID: data.PolicyID, + policyName: data.PolicyName, + deviceList: []string{}, + deviceID: "", + ifList: []string{}, + deviceIF: "", + handlerLabel: "", + format: "prom_sinker", + warning: "Deprecated, soon we will substitute for openTelemetry, check https://orb.community/documentation to how enable openTelemetry in your agent", + tags: tags, + logger: p.logger, + } + stats := make(map[string]StatSnapshot) + for handlerLabel, handlerData := range metrics { + if data, ok := handlerData["pcap"]; ok { + sTmp := StatSnapshot{} + err := mapstructure.Decode(data, &sTmp.Pcap) + if err != nil { + p.logger.Error("error decoding pcap handler", zap.Error(err)) + continue + } + stats[handlerLabel] = sTmp + } else if data, ok := handlerData["dns"]; ok { + sTmp := StatSnapshot{} + err := mapstructure.Decode(data, &sTmp.DNS) + if err != nil { + p.logger.Error("error decoding dns handler", zap.Error(err)) + continue + } + stats[handlerLabel] = sTmp + } else if data, ok := handlerData["packets"]; ok { + sTmp := StatSnapshot{} + err := mapstructure.Decode(data, &sTmp.Packets) + if err != nil { + p.logger.Error("error decoding packets handler", zap.Error(err)) + continue + } + stats[handlerLabel] = sTmp + } else if data, ok := handlerData["dhcp"]; ok { + sTmp := StatSnapshot{} + err := mapstructure.Decode(data, &sTmp.DHCP) + if err != nil { + p.logger.Error("error decoding dhcp handler", zap.Error(err)) + continue + } + stats[handlerLabel] = sTmp + } else if data, ok := handlerData["flow"]; ok { + sTmp := StatSnapshot{} + err := mapstructure.Decode(data, &sTmp.Flow) + if err != nil { + p.logger.Error("error decoding dhcp handler", zap.Error(err)) + continue + } + stats[handlerLabel] = sTmp + } + } + return parseToProm(&appendix, stats), nil +} + +func parseToProm(ctxt *metricAppendix, statsMap map[string]StatSnapshot) prometheus.TSList { + var finalTs = prometheus.TSList{} + for handlerLabel, stats := range statsMap { + var tsList = prometheus.TSList{} + statsMap := structs.Map(stats) + ctxt.handlerLabel = handlerLabel + if stats.Flow != nil { + convertFlowToPromParticle(ctxt, statsMap, "", &tsList) + } else { + convertToPromParticle(ctxt, statsMap, "", &tsList) + } + finalTs = append(finalTs, tsList...) + } + return finalTs +} + +func convertToPromParticle(ctxt *metricAppendix, statsMap map[string]interface{}, label string, tsList *prometheus.TSList) { + for key, value := range statsMap { + switch statistic := value.(type) { + case map[string]interface{}: + // Call convertToPromParticle recursively until the last interface of the StatSnapshot struct + // The prom particle label it's been formed during the recursive call (concatenation) + convertToPromParticle(ctxt, statistic, label+key, tsList) + // The StatSnapshot has two ways to record metrics (i.e. Live int64 `mapstructure:"live"`) + // It's why we check if the type is int64 + case int64: + { + // Use this regex to identify if the value it's a quantile + var matchFirstQuantile = regexp.MustCompile("^([Pp])+[0-9]") + if ok := matchFirstQuantile.MatchString(key); ok { + // If it's quantile, needs to be parsed to prom quantile format + tsList = makePromParticle(ctxt, label, key, value, tsList, ok, "") + } else { + tsList = makePromParticle(ctxt, label+key, "", value, tsList, false, "") + } + } + // The StatSnapshot has two ways to record metrics (i.e. P50 float64 `mapstructure:"p50"`) + // It's why we check if the type is float64 + case float64: + { + // Use this regex to identify if the value it's a quantile + var matchFirstQuantile = regexp.MustCompile("^[Pp]+[0-9]") + if ok := matchFirstQuantile.MatchString(key); ok { + // If it's quantile, needs to be parsed to prom quantile format + tsList = makePromParticle(ctxt, label, key, value, tsList, ok, "") + } else { + tsList = makePromParticle(ctxt, label+key, "", value, tsList, false, "") + } + } + // The StatSnapshot has two ways to record metrics (i.e. TopIpv4 []NameCount `mapstructure:"top_ipv4"`) + // It's why we check if the type is []interface + // Here we extract the value for Name and Estimate + case []interface{}: + { + for _, value := range statistic { + m, ok := value.(map[string]interface{}) + if !ok { + return + } + var promLabel string + var promDataPoint interface{} + for k, v := range m { + switch k { + case "Name": + { + promLabel = fmt.Sprintf("%v", v) + } + case "Estimate": + { + promDataPoint = v + } + } + } + tsList = makePromParticle(ctxt, label+key, promLabel, promDataPoint, tsList, false, key) + } + } + } + } +} + +func convertFlowToPromParticle(ctxt *metricAppendix, statsMap map[string]interface{}, label string, tsList *prometheus.TSList) { + for key, value := range statsMap { + switch statistic := value.(type) { + case map[string]interface{}: + // Call convertToPromParticle recursively until the last interface of the StatSnapshot struct + // The prom particle label it's been formed during the recursive call (concatenation) + + if label == "FlowDevices" { + label = strings.ReplaceAll(label, "Devices", "") + for mkey := range statsMap { + ctxt.deviceList = append(ctxt.deviceList, mkey) + } + ctxt.deviceID = key + ctxt.deviceIF = "" + convertFlowToPromParticle(ctxt, statistic, label, tsList) + } else if label == "FlowInterfaces" { + label = strings.ReplaceAll(label, "Interfaces", "") + for mkey := range statsMap { + ctxt.ifList = append(ctxt.ifList, mkey) + } + ctxt.deviceIF = ctxt.deviceID + "|" + key + convertFlowToPromParticle(ctxt, statistic, label, tsList) + } else if slices.Contains(ctxt.deviceList, key) { + ctxt.deviceID = key + convertFlowToPromParticle(ctxt, statistic, label, tsList) + } else if slices.Contains(ctxt.ifList, key) { + ctxt.deviceIF = ctxt.deviceID + "|" + key + convertFlowToPromParticle(ctxt, statistic, label, tsList) + } else { + convertFlowToPromParticle(ctxt, statistic, label+key, tsList) + } + + // The StatSnapshot has two ways to record metrics (i.e. Live int64 `mapstructure:"live"`) + // It's why we check if the type is int64 + case int64: + { + // Use this regex to identify if the value it's a quantile + var matchFirstQuantile = regexp.MustCompile("^([Pp])+[0-9]") + if ok := matchFirstQuantile.MatchString(key); ok { + // If it's quantile, needs to be parsed to prom quantile format + tsList = makePromParticle(ctxt, label, key, value, tsList, ok, "") + } else { + tsList = makePromParticle(ctxt, label+key, "", value, tsList, false, "") + } + } + // The StatSnapshot has two ways to record metrics (i.e. TopIpv4 []NameCount `mapstructure:"top_ipv4"`) + // It's why we check if the type is []interface + // Here we extract the value for Name and Estimate + case []interface{}: + { + for _, value := range statistic { + m, ok := value.(map[string]interface{}) + if !ok { + return + } + var promLabel string + var promDataPoint interface{} + for k, v := range m { + switch k { + case "Name": + { + promLabel = fmt.Sprintf("%v", v) + } + case "Estimate": + { + promDataPoint = v + } + } + } + tsList = makePromParticle(ctxt, label+key, promLabel, promDataPoint, tsList, false, key) + } + } + } + } +} + +func makePromParticle(ctxt *metricAppendix, label string, k string, v interface{}, tsList *prometheus.TSList, quantile bool, name string) *prometheus.TSList { + mapQuantiles := make(map[string]string) + mapQuantiles["P50"] = "0.5" + mapQuantiles["P90"] = "0.9" + mapQuantiles["P95"] = "0.95" + mapQuantiles["P99"] = "0.99" + + var dpFlag dp + var labelsListFlag labelList + if err := labelsListFlag.Set(fmt.Sprintf("__name__;%s", camelToSnake(label))); err != nil { + handleParticleError(ctxt, err) + return tsList + } + if err := labelsListFlag.Set("instance;" + ctxt.agent.AgentName); err != nil { + handleParticleError(ctxt, err) + return tsList + } + if err := labelsListFlag.Set("job;" + ctxt.policyID); err != nil { + handleParticleError(ctxt, err) + return tsList + } + if err := labelsListFlag.Set("agent_id;" + ctxt.agentID); err != nil { + handleParticleError(ctxt, err) + return tsList + } + if err := labelsListFlag.Set("agent;" + ctxt.agent.AgentName); err != nil { + handleParticleError(ctxt, err) + return tsList + } + if err := labelsListFlag.Set("policy_id;" + ctxt.policyID); err != nil { + handleParticleError(ctxt, err) + return tsList + } + if err := labelsListFlag.Set("policy;" + ctxt.policyName); err != nil { + handleParticleError(ctxt, err) + return tsList + } + if err := labelsListFlag.Set("handler;" + ctxt.handlerLabel); err != nil { + handleParticleError(ctxt, err) + return tsList + } + if ctxt.deviceID != "" { + if err := labelsListFlag.Set("device;" + ctxt.deviceID); err != nil { + handleParticleError(ctxt, err) + ctxt.deviceID = "" + return tsList + } + } + if ctxt.deviceIF != "" { + if err := labelsListFlag.Set("device_interface;" + ctxt.deviceIF); err != nil { + handleParticleError(ctxt, err) + ctxt.deviceIF = "" + return tsList + } + } + + for k, v := range ctxt.tags { + if err := labelsListFlag.Set(k + ";" + v); err != nil { + handleParticleError(ctxt, err) + return tsList + } + } + + if k != "" { + if quantile { + if value, ok := mapQuantiles[k]; ok { + if err := labelsListFlag.Set(fmt.Sprintf("quantile;%s", value)); err != nil { + handleParticleError(ctxt, err) + return tsList + } + } + } else { + parsedName, err := topNMetricsParser(name) + if err != nil { + ctxt.logger.Error("failed to parse Top N metric, default value it'll be used", zap.Error(err)) + parsedName = "name" + } + if err := labelsListFlag.Set(fmt.Sprintf("%s;%s", parsedName, k)); err != nil { + handleParticleError(ctxt, err) + return tsList + } + } + } + if err := dpFlag.Set(fmt.Sprintf("now,%d", v)); err != nil { + if err := dpFlag.Set(fmt.Sprintf("now,%v", v)); err != nil { + handleParticleError(ctxt, err) + return tsList + } + } + timeSeries := prometheus.TimeSeries{ + Labels: labelsListFlag, + Datapoint: prometheus.Datapoint(dpFlag), + } + *tsList = append(*tsList, timeSeries) + return tsList +} + +func handleParticleError(ctxt *metricAppendix, err error) { + ctxt.logger.Error("failed to set prometheus element", zap.Error(err)) +} + +func camelToSnake(s string) string { + var matchFirstCap = regexp.MustCompile("(.)([A-Z][a-z]+)") + var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])") + + // Approach to avoid change the values to TopGeoLoc and TopASN + // Should continue camel case or upper case + var matchExcept = regexp.MustCompile(`(oLoc$|pASN$)`) + sub := matchExcept.Split(s, 2) + var strExcept = "" + if len(sub) > 1 { + strExcept = matchExcept.FindAllString(s, 1)[0] + if strExcept == "pASN" { + strExcept = "p_ASN" + } + s = sub[0] + } + + snake := matchFirstCap.ReplaceAllString(s, "${1}_${2}") + snake = matchAllCap.ReplaceAllString(snake, "${1}_${2}") + lower := strings.ToLower(snake) + return lower + strExcept +} + +func topNMetricsParser(label string) (string, error) { + mapNMetrics := make(map[string]string) + mapNMetrics["TopGeoLocECS"] = "geo_loc" + mapNMetrics["TopGeoLoc"] = "geo_loc" + mapNMetrics["TopAsnECS"] = "asn" + mapNMetrics["TopASN"] = "asn" + mapNMetrics["TopQueryECS"] = "ecs" + mapNMetrics["TopIpv6"] = "ipv6" + mapNMetrics["TopIpv4"] = "ipv4" + mapNMetrics["TopQname2"] = "qname" + mapNMetrics["TopQname3"] = "qname" + mapNMetrics["TopQnameByRespBytes"] = "qname" + mapNMetrics["TopNxdomain"] = "qname" + mapNMetrics["TopQtype"] = "qtype" + mapNMetrics["TopRcode"] = "rcode" + mapNMetrics["TopREFUSED"] = "qname" + mapNMetrics["TopNODATA"] = "qname" + mapNMetrics["TopSRVFAIL"] = "qname" + mapNMetrics["TopUDPPorts"] = "port" + mapNMetrics["TopSlow"] = "qname" + mapNMetrics["TopGeoLocBytes"] = "geo_loc" + mapNMetrics["TopGeoLocPackes"] = "geo_loc" + mapNMetrics["TopAsnBytes"] = "asn" + mapNMetrics["TopAsnPackets"] = "asn" + mapNMetrics["TopInDstIpsBytes"] = "ip" + mapNMetrics["TopInDstIpsPackets"] = "ip" + mapNMetrics["TopInSrcIpsBytes"] = "ip" + mapNMetrics["TopInSrcIpsPackets"] = "ip" + mapNMetrics["TopInDstPortsBytes"] = "port" + mapNMetrics["TopInDstPortsPackets"] = "port" + mapNMetrics["TopInSrcPortsBytes"] = "port" + mapNMetrics["TopInSrcPortsPackets"] = "port" + mapNMetrics["TopInDstIpsAndPortBytes"] = "ip_port" + mapNMetrics["TopInDstIpsAndPortPackets"] = "ip_port" + mapNMetrics["TopInSrcIpsAndPortBytes"] = "ip_port" + mapNMetrics["TopInSrcIpsAndPortPackets"] = "ip_port" + mapNMetrics["TopOutDstIpsBytes"] = "ip" + mapNMetrics["TopOutDstIpsPackets"] = "ip" + mapNMetrics["TopOutSrcIpsBytes"] = "ip" + mapNMetrics["TopOutSrcIpsPackets"] = "ip" + mapNMetrics["TopOutDstPortsBytes"] = "port" + mapNMetrics["TopOutDstPortsPackets"] = "port" + mapNMetrics["TopOutSrcPortsBytes"] = "port" + mapNMetrics["TopOutSrcPortsPackets"] = "port" + mapNMetrics["TopOutDstIpsAndPortBytes"] = "ip_port" + mapNMetrics["TopOutDstIpsAndPortPackets"] = "ip_port" + mapNMetrics["TopOutSrcIpsAndPortBytes"] = "ip_port" + mapNMetrics["TopOutSrcIpsAndPortPackets"] = "ip_port" + mapNMetrics["TopConversationsBytes"] = "conversations" + mapNMetrics["TopConversationsPackets"] = "conversations" + mapNMetrics["TopInInterfacesBytes"] = "interface" + mapNMetrics["TopInInterfacesPackets"] = "interface" + mapNMetrics["TopOutInterfacesBytes"] = "interface" + mapNMetrics["TopOutInterfacesPackets"] = "interface" + if value, ok := mapNMetrics[label]; ok { + return value, nil + } else { + return "", errors.New(fmt.Sprintf("top N metric not mapped for parse: %s", label)) + } +} + +func Register(logger *zap.Logger) bool { + backend.Register("pktvisor", &pktvisorBackend{logger: logger}) + return true +} diff --git a/sinker/backend/pktvisor/pktvisor_test.go b/sinker/backend/pktvisor/pktvisor_test.go new file mode 100644 index 000000000..494569df3 --- /dev/null +++ b/sinker/backend/pktvisor/pktvisor_test.go @@ -0,0 +1,5302 @@ +package pktvisor_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/gofrs/uuid" + "github.com/orb-community/orb/fleet" + "github.com/orb-community/orb/fleet/pb" + "github.com/orb-community/orb/pkg/types" + "github.com/orb-community/orb/sinker/backend" + "github.com/orb-community/orb/sinker/backend/pktvisor" + "github.com/orb-community/orb/sinker/prometheus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +func TestDHCPConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + commonLabels := []prometheus.Label{ + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dhcp", + }, + } + + cases := map[string]struct { + data []byte + expected prometheus.TimeSeries + }{ + "DHCPPayloadWirePacketsFiltered": { + data: []byte(` +{ + "policy_dhcp": { + "dhcp": { + "wire_packets": { + "filtered": 10 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dhcp_wire_packets_filtered"})), + Datapoint: prometheus.Datapoint{ + Value: 10, + }, + }, + }, + "DHCPPayloadWirePacketsTotal": { + data: []byte(` +{ + "policy_dhcp": { + "dhcp": { + "wire_packets": { + "total": 10 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dhcp_wire_packets_total"})), + Datapoint: prometheus.Datapoint{ + Value: 10, + }, + }, + }, + "DHCPPayloadWirePacketsDeepSamples": { + data: []byte(` +{ + "policy_dhcp": { + "dhcp": { + "wire_packets": { + "deep_samples": 10 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dhcp_wire_packets_deep_samples"})), + Datapoint: prometheus.Datapoint{ + Value: 10, + }, + }, + }, + "DHCPPayloadWirePacketsDiscover": { + data: []byte(` +{ + "policy_dhcp": { + "dhcp": { + "wire_packets": { + "discover": 10 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dhcp_wire_packets_discover"})), + Datapoint: prometheus.Datapoint{ + Value: 10, + }, + }, + }, + "DHCPPayloadWirePacketsOffer": { + data: []byte(` +{ + "policy_dhcp": { + "dhcp": { + "wire_packets": { + "offer": 10 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dhcp_wire_packets_offer"})), + Datapoint: prometheus.Datapoint{ + Value: 10, + }, + }, + }, + "DHCPPayloadWirePacketsRequest": { + data: []byte(` +{ + "policy_dhcp": { + "dhcp": { + "wire_packets": { + "request": 10 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dhcp_wire_packets_request"})), + Datapoint: prometheus.Datapoint{ + Value: 10, + }, + }, + }, + "DHCPPayloadWirePacketsAck": { + data: []byte(` +{ + "policy_dhcp": { + "dhcp": { + "wire_packets": { + "ack": 10 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dhcp_wire_packets_ack"})), + Datapoint: prometheus.Datapoint{ + Value: 10, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint prometheus.Datapoint + for _, value := range res { + if c.expected.Labels[0] == value.Labels[0] { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } + } + assert.True(t, reflect.DeepEqual(c.expected.Labels, receivedLabel), fmt.Sprintf("%s: expected %v got %v", desc, c.expected.Labels, receivedLabel)) + assert.Equal(t, c.expected.Datapoint.Value, receivedDatapoint.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expected.Datapoint.Value, receivedDatapoint.Value)) + }) + } + +} + +func TestASNConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + cases := map[string]struct { + data []byte + expected prometheus.TimeSeries + }{ + "PacketPayloadTopASN": { + data: []byte(` +{ + "policy_packets": { + "packets": { + "top_ASN": [ + { + "estimate": 996, + "name": "36236/NETACTUATE" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "packets_top_ASN", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_packets", + }, + { + Name: "asn", + Value: "36236/NETACTUATE", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 996, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint prometheus.Datapoint + for _, value := range res { + if c.expected.Labels[0] == value.Labels[0] { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } + } + assert.True(t, reflect.DeepEqual(c.expected.Labels, receivedLabel), fmt.Sprintf("%s: expected %v got %v", desc, c.expected.Labels, receivedLabel)) + assert.Equal(t, c.expected.Datapoint.Value, receivedDatapoint.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expected.Datapoint.Value, receivedDatapoint.Value)) + }) + } + +} + +func TestGeoLocConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + cases := map[string]struct { + data []byte + expected prometheus.TimeSeries + }{ + "PacketPayloadTopGeoLoc": { + data: []byte(` +{ + "policy_packets": { + "packets": { + "top_geoLoc": [ + { + "estimate": 996, + "name": "AS/Hong Kong/HCW/Central" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "packets_top_geoLoc", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_packets", + }, + { + Name: "geo_loc", + Value: "AS/Hong Kong/HCW/Central", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 996, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint prometheus.Datapoint + for _, value := range res { + if c.expected.Labels[0] == value.Labels[0] { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } + } + assert.True(t, reflect.DeepEqual(c.expected.Labels, receivedLabel), fmt.Sprintf("%s: expected %v got %v", desc, c.expected.Labels, receivedLabel)) + assert.Equal(t, c.expected.Datapoint.Value, receivedDatapoint.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expected.Datapoint.Value, receivedDatapoint.Value)) + }) + } + +} + +func TestPCAPConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + commonLabels := []prometheus.Label{ + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_pcap", + }, + } + + cases := map[string]struct { + data []byte + expected prometheus.TimeSeries + }{ + "PCAPPayload_Tcp_Reassembly_Errors": { + data: []byte(` +{ + "policy_pcap": { + "pcap": { + "tcp_reassembly_errors": 2 + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "pcap_tcp_reassembly_errors", + })), + Datapoint: prometheus.Datapoint{ + Value: 2, + }, + }, + }, + "PCAPPayload_if_drops": { + data: []byte(` +{ + "policy_pcap": { + "pcap": { + "if_drops": 2 + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "pcap_if_drops", + })), + Datapoint: prometheus.Datapoint{ + Value: 2, + }, + }, + }, + "PCAPPayload_os_drops": { + data: []byte(` +{ + "policy_pcap": { + "pcap": { + "os_drops": 2 + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "pcap_os_drops", + })), + Datapoint: prometheus.Datapoint{ + Value: 2, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint prometheus.Datapoint + for _, value := range res { + if c.expected.Labels[0] == value.Labels[0] { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } + } + assert.True(t, reflect.DeepEqual(c.expected.Labels, receivedLabel), fmt.Sprintf("%s: expected %v got %v", desc, c.expected.Labels, receivedLabel)) + assert.Equal(t, c.expected.Datapoint.Value, receivedDatapoint.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expected.Datapoint.Value, receivedDatapoint.Value)) + }) + } + +} + +func TestDNSConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + commonLabels := []prometheus.Label{ + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + } + + cases := map[string]struct { + data []byte + expected prometheus.TimeSeries + }{ + "DNSPayloadCardinalityTotal": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "cardinality": { + "qname": 4 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_cardinality_qname", + })), + Datapoint: prometheus.Datapoint{ + Value: 4, + }, + }, + }, + "DNSPayloadTopNxdomain": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "top_nxdomain": [ + { + "estimate": 186, + "name": "89.187.189.231" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_top_nxdomain", + }), prometheus.Label{ + Name: "qname", + Value: "89.187.189.231", + }), + Datapoint: prometheus.Datapoint{ + Value: 186, + }, + }, + }, + "DNSPayloadTopRefused": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "top_refused": [ + { + "estimate": 186, + "name": "89.187.189.231" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_top_refused", + }), prometheus.Label{ + Name: "qname", + Value: "89.187.189.231", + }), + Datapoint: prometheus.Datapoint{ + Value: 186, + }, + }, + }, + "DNSPayloadTopSrvfail": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "top_srvfail": [ + { + "estimate": 186, + "name": "89.187.189.231" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_top_srvfail", + }), prometheus.Label{ + Name: "qname", + Value: "89.187.189.231", + }), + Datapoint: prometheus.Datapoint{ + Value: 186, + }, + }, + }, + "DNSPayloadTopNodata": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "top_nodata": [ + { + "estimate": 186, + "name": "89.187.189.231" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_top_nodata", + }), prometheus.Label{ + Name: "qname", + Value: "89.187.189.231", + }), + Datapoint: prometheus.Datapoint{ + Value: 186, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint prometheus.Datapoint + for _, value := range res { + if c.expected.Labels[0] == value.Labels[0] { + if len(c.expected.Labels) < 7 { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } else { + if c.expected.Labels[6].Value == value.Labels[6].Value { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } + } + } + } + assert.True(t, reflect.DeepEqual(c.expected.Labels, receivedLabel), fmt.Sprintf("%s: expected %v got %v", desc, c.expected.Labels, receivedLabel)) + assert.Equal(t, c.expected.Datapoint.Value, receivedDatapoint.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expected.Datapoint.Value, receivedDatapoint.Value)) + }) + } + +} + +func TestDNSRatesConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + commonLabels := []prometheus.Label{ + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "quantile", + Value: "0.5", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "quantile", + Value: "0.9", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "quantile", + Value: "0.95", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "quantile", + Value: "0.99", + }, + } + + cases := map[string]struct { + data []byte + expectedLabels []prometheus.Label + expectedDatapoints []float64 + }{ + "DNSPayloadRatesTotal": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "rates": { + "total": { + "p50": 0, + "p90": 1, + "p95": 2, + "p99": 6 + } + } + } + } +}`), + expectedLabels: labelQuantiles(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_rates_total", + }), + expectedDatapoints: []float64{0, 1, 2, 6}, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint []float64 + + for _, value := range res { + if c.expectedLabels[0] == value.Labels[0] { + for _, labels := range value.Labels { + receivedLabel = append(receivedLabel, labels) + } + receivedDatapoint = append(receivedDatapoint, value.Datapoint.Value) + } + } + + assert.ElementsMatch(t, c.expectedLabels, receivedLabel, fmt.Sprintf("%s: expected %v got %v", desc, c.expectedLabels, receivedLabel)) + assert.ElementsMatch(t, c.expectedDatapoints, receivedDatapoint, fmt.Sprintf("%s: expected %v got %v", desc, c.expectedDatapoints, receivedDatapoint)) + }) + } + +} + +func TestDHCPRatesConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + commonLabels := []prometheus.Label{ + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dhcp", + }, + { + Name: "quantile", + Value: "0.5", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dhcp", + }, + { + Name: "quantile", + Value: "0.9", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dhcp", + }, + { + Name: "quantile", + Value: "0.95", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dhcp", + }, + { + Name: "quantile", + Value: "0.99", + }, + } + + cases := map[string]struct { + data []byte + expectedLabels []prometheus.Label + expectedDatapoints []float64 + }{ + "DHCPPayloadRates": { + data: []byte(` +{ + "policy_dhcp": { + "dhcp": { + "rates": { + "total": { + "p50": 0, + "p90": 1, + "p95": 2, + "p99": 6 + } + } + } + } +}`), + expectedLabels: labelQuantiles(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dhcp_rates_total", + }), + expectedDatapoints: []float64{0, 1, 2, 6}, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint []float64 + + for _, value := range res { + if c.expectedLabels[0] == value.Labels[0] { + for _, labels := range value.Labels { + receivedLabel = append(receivedLabel, labels) + } + receivedDatapoint = append(receivedDatapoint, value.Datapoint.Value) + } + } + + assert.ElementsMatch(t, c.expectedLabels, receivedLabel, fmt.Sprintf("%s: expected %v got %v", desc, c.expectedLabels, receivedLabel)) + assert.ElementsMatch(t, c.expectedDatapoints, receivedDatapoint, fmt.Sprintf("%s: expected %v got %v", desc, c.expectedDatapoints, receivedDatapoint)) + }) + } + +} + +func TestPacketsRatesConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + commonLabels := []prometheus.Label{ + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "quantile", + Value: "0.5", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "quantile", + Value: "0.9", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "quantile", + Value: "0.95", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "quantile", + Value: "0.99", + }, + } + + cases := map[string]struct { + data []byte + expectedLabels []prometheus.Label + expectedDatapoints []float64 + }{ + "PacketsPayloadRatesPpsIn": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "rates": { + "pps_in": { + "p50": 0, + "p90": 1, + "p95": 2, + "p99": 6 + } + } + } + } +}`), + expectedLabels: labelQuantiles(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_rates_pps_in", + }), + expectedDatapoints: []float64{0, 1, 2, 6}, + }, + "PacketsPayloadRatesPpsTotal": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "rates": { + "pps_total": { + "p50": 0, + "p90": 1, + "p95": 2, + "p99": 6 + } + } + } + } +}`), + expectedLabels: labelQuantiles(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_rates_pps_total", + }), + expectedDatapoints: []float64{0, 1, 2, 6}, + }, + "PacketsPayloadRatesPpsOut": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "rates": { + "pps_out": { + "p50": 0, + "p90": 1, + "p95": 2, + "p99": 6 + } + } + } + } +}`), + expectedLabels: labelQuantiles(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_rates_pps_out", + }), + expectedDatapoints: []float64{0, 1, 2, 6}, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint []float64 + + for _, value := range res { + if c.expectedLabels[0] == value.Labels[0] { + for _, labels := range value.Labels { + receivedLabel = append(receivedLabel, labels) + } + receivedDatapoint = append(receivedDatapoint, value.Datapoint.Value) + } + } + + assert.ElementsMatch(t, c.expectedLabels, receivedLabel, fmt.Sprintf("%s: expected %v got %v", desc, c.expectedLabels, receivedLabel)) + assert.ElementsMatch(t, c.expectedDatapoints, receivedDatapoint, fmt.Sprintf("%s: expected %v got %v", desc, c.expectedDatapoints, receivedDatapoint)) + }) + } + +} + +func TestDNSTopKMetricsConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + cases := map[string]struct { + data []byte + expected prometheus.TimeSeries + }{ + "PacketPayloadToqQName2": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "top_qname2": [ + { + "estimate": 8, + "name": ".google.com" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "dns_top_qname2", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "qname", + Value: ".google.com", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "PacketPayloadToqQName3": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "top_qname3": [ + { + "estimate": 6, + "name": ".l.google.com" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "dns_top_qname3", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "qname", + Value: ".l.google.com", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 6, + }, + }, + }, + "PacketPayloadTopQueryECS": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "top_query_ecs": [ + { + "estimate": 6, + "name": "2001:470:1f0b:1600::" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "dns_top_query_ecs", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "ecs", + Value: "2001:470:1f0b:1600::", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 6, + }, + }, + }, + "PacketPayloadToqQType": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "top_qtype": [ + { + "estimate": 6, + "name": "HTTPS" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "dns_top_qtype", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "qtype", + Value: "HTTPS", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 6, + }, + }, + }, + "PacketPayloadTopUDPPorts": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "top_udp_ports": [ + { + "estimate": 2, + "name": "39783" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "dns_top_udp_ports", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "port", + Value: "39783", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 2, + }, + }, + }, + "PacketPayloadTopRCode": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "top_rcode": [ + { + "estimate": 8, + "name": "NOERROR" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "dns_top_rcode", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + { + Name: "rcode", + Value: "NOERROR", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint prometheus.Datapoint + for _, value := range res { + if c.expected.Labels[0] == value.Labels[0] { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } + } + assert.True(t, reflect.DeepEqual(c.expected.Labels, receivedLabel), fmt.Sprintf("%s: expected %v got %v", desc, c.expected.Labels, receivedLabel)) + assert.Equal(t, c.expected.Datapoint.Value, receivedDatapoint.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expected.Datapoint.Value, receivedDatapoint.Value)) + }) + } + +} + +func TestDNSWirePacketsConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + commonLabels := []prometheus.Label{ + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + } + + cases := map[string]struct { + data []byte + expected prometheus.TimeSeries + }{ + "DNSPayloadWirePacketsIpv4": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "wire_packets": { + "ipv4": 1 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_wire_packets_ipv4", + })), + Datapoint: prometheus.Datapoint{ + Value: 1, + }, + }, + }, + "DNSPayloadWirePacketsIpv6": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "wire_packets": { + "ipv6": 14 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_wire_packets_ipv6", + })), + Datapoint: prometheus.Datapoint{ + Value: 14, + }, + }, + }, + "DNSPayloadWirePacketsNodata": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "wire_packets": { + "nodata": 8 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_wire_packets_nodata", + })), + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "DNSPayloadWirePacketsNoerror": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "wire_packets": { + "noerror": 8 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_wire_packets_noerror", + })), + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "DNSPayloadWirePacketsNxdomain": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "wire_packets": { + "nxdomain": 6 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_wire_packets_nxdomain", + })), + Datapoint: prometheus.Datapoint{ + Value: 6, + }, + }, + }, + "DNSPayloadWirePacketsQueries": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "wire_packets": { + "queries": 7 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_wire_packets_queries", + })), + Datapoint: prometheus.Datapoint{ + Value: 7, + }, + }, + }, + "DNSPayloadWirePacketsRefused": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "wire_packets": { + "refused": 8 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_wire_packets_refused", + })), + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "DNSPayloadWirePacketsFiltered": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "wire_packets": { + "filtered": 8 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_wire_packets_filtered", + })), + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "DNSPayloadWirePacketsReplies": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "wire_packets": { + "replies": 8 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_wire_packets_replies", + })), + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "DNSPayloadWirePacketsSrvfail": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "wire_packets": { + "srvfail": 9 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_wire_packets_srvfail", + })), + Datapoint: prometheus.Datapoint{ + Value: 9, + }, + }, + }, + "DNSPayloadWirePacketsTcp": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "wire_packets": { + "tcp": 9 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_wire_packets_tcp", + })), + Datapoint: prometheus.Datapoint{ + Value: 9, + }, + }, + }, + "DNSPayloadWirePacketsTotal": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "wire_packets": { + "total": 9 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_wire_packets_total", + })), + Datapoint: prometheus.Datapoint{ + Value: 9, + }, + }, + }, + "DNSPayloadWirePacketsUdp": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "wire_packets": { + "udp": 9 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_wire_packets_udp", + })), + Datapoint: prometheus.Datapoint{ + Value: 9, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint prometheus.Datapoint + for _, value := range res { + if c.expected.Labels[0] == value.Labels[0] { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } + } + assert.True(t, reflect.DeepEqual(c.expected.Labels, receivedLabel), fmt.Sprintf("%s: expected %v got %v", desc, c.expected.Labels, receivedLabel)) + assert.Equal(t, c.expected.Datapoint.Value, receivedDatapoint.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expected.Datapoint.Value, receivedDatapoint.Value)) + }) + } + +} + +func TestDNSXactConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + commonLabels := []prometheus.Label{ + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + } + + cases := map[string]struct { + data []byte + expected prometheus.TimeSeries + }{ + "DNSPayloadXactCountsTimedOut": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "xact": { + "counts": { + "timed_out": 1 + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_xact_counts_timed_out", + })), + Datapoint: prometheus.Datapoint{ + Value: 1, + }, + }, + }, + "DNSPayloadXactCountsTotal": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "xact": { + "counts": { + "total": 8 + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_xact_counts_total", + })), + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "DNSPayloadXactInTotal": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "xact": { + "in": { + "total": 8 + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_xact_in_total", + })), + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "DNSPayloadXactInTopSlow": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "xact": { + "in": { + "top_slow": [ + { + "estimate": 111, + "name": "23.43.252.68" + } + ] + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_xact_in_top_slow", + }), prometheus.Label{ + Name: "qname", + Value: "23.43.252.68", + }), + Datapoint: prometheus.Datapoint{ + Value: 111, + }, + }, + }, + "DNSPayloadXactOutTopSlow": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "xact": { + "out": { + "top_slow": [ + { + "estimate": 111, + "name": "23.43.252.68" + } + ] + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_xact_out_top_slow", + }), prometheus.Label{ + Name: "qname", + Value: "23.43.252.68", + }), + Datapoint: prometheus.Datapoint{ + Value: 111, + }, + }, + }, + "DNSPayloadXactOutTotal": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "xact": { + "out": { + "total": 8 + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "dns_xact_out_total", + })), + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint prometheus.Datapoint + for _, value := range res { + if c.expected.Labels[0] == value.Labels[0] { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } + } + assert.True(t, reflect.DeepEqual(c.expected.Labels, receivedLabel), fmt.Sprintf("%s: expected %v got %v", desc, c.expected.Labels, receivedLabel)) + assert.Equal(t, c.expected.Datapoint.Value, receivedDatapoint.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expected.Datapoint.Value, receivedDatapoint.Value)) + }) + } +} + +func TestPacketsConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + commonLabels := []prometheus.Label{ + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + } + + cases := map[string]struct { + data []byte + expected prometheus.TimeSeries + }{ + "DNSPayloadPacketsCardinalityDst": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "cardinality": { + "dst_ips_out": 41 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_cardinality_dst_ips_out", + })), + Datapoint: prometheus.Datapoint{ + Value: 41, + }, + }, + }, + "DNSPayloadPacketsCardinalitySrc": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "cardinality": { + "src_ips_in": 43 + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_cardinality_src_ips_in", + })), + Datapoint: prometheus.Datapoint{ + Value: 43, + }, + }, + }, + "DNSPayloadPacketsDeepSamples": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "deep_samples": 3139 + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_deep_samples", + })), + Datapoint: prometheus.Datapoint{ + Value: 3139, + }, + }, + }, + "DNSPayloadPacketsIn": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "in": 1422 + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_in", + })), + Datapoint: prometheus.Datapoint{ + Value: 1422, + }, + }, + }, + "DNSPayloadPacketsIpv4": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "ipv4": 2506 + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_ipv4", + })), + Datapoint: prometheus.Datapoint{ + Value: 2506, + }, + }, + }, + "DNSPayloadPacketsIpv6": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "ipv6": 2506 + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_ipv6", + })), + Datapoint: prometheus.Datapoint{ + Value: 2506, + }, + }, + }, + "DNSPayloadPacketsOtherL4": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "other_l4": 637 + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_other_l4", + })), + Datapoint: prometheus.Datapoint{ + Value: 637, + }, + }, + }, + "DNSPayloadPacketsFiltered": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "filtered": 637 + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_filtered", + })), + Datapoint: prometheus.Datapoint{ + Value: 637, + }, + }, + }, + "DNSPayloadPacketsOut": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "out": 1083 + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_out", + })), + Datapoint: prometheus.Datapoint{ + Value: 1083, + }, + }, + }, + "DNSPayloadPacketsTcp": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "tcp": 549 + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_tcp", + })), + Datapoint: prometheus.Datapoint{ + Value: 549, + }, + }, + }, + "DNSPayloadPacketsTotal": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "total": 3139 + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_total", + })), + Datapoint: prometheus.Datapoint{ + Value: 3139, + }, + }, + }, + "DNSPayloadPacketsUdp": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "udp": 1953 + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_udp", + })), + Datapoint: prometheus.Datapoint{ + Value: 1953, + }, + }, + }, + "DNSPayloadPacketsTopIpv4": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "top_ipv4": [ + { + "estimate": 996, + "name": "103.6.85.201" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_top_ipv4", + }), prometheus.Label{ + Name: "ipv4", + Value: "103.6.85.201", + }), + Datapoint: prometheus.Datapoint{ + Value: 996, + }, + }, + }, + "DNSPayloadPacketsTopIpv6": { + data: []byte(` +{ + "policy_dns": { + "packets": { + "top_ipv6": [ + { + "estimate": 996, + "name": "103.6.85.201" + } + ] + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "packets_top_ipv6", + }), prometheus.Label{ + Name: "ipv6", + Value: "103.6.85.201", + }), + Datapoint: prometheus.Datapoint{ + Value: 996, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint prometheus.Datapoint + for _, value := range res { + if c.expected.Labels[0] == value.Labels[0] { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } + } + assert.True(t, reflect.DeepEqual(c.expected.Labels, receivedLabel), fmt.Sprintf("%s: expected %v got %v", desc, c.expected.Labels, receivedLabel)) + assert.Equal(t, c.expected.Datapoint.Value, receivedDatapoint.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expected.Datapoint.Value, receivedDatapoint.Value)) + }) + } +} + +func TestPeriodConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + cases := map[string]struct { + data []byte + expectedLength prometheus.TimeSeries + expectedStartTs prometheus.TimeSeries + }{ + "DNSPayloadPeriod": { + data: []byte(` +{ + "policy_dns": { + "dns": { + "period": { + "length": 60, + "start_ts": 1624888107 + } + } + } +}`), + expectedLength: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "dns_period_length", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 60, + }, + }, + expectedStartTs: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "dns_period_start_ts", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dns", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 1624888107, + }, + }, + }, + "PacketsPayloadPeriod": { + data: []byte(` +{ + "policy_packets": { + "packets": { + "period": { + "length": 60, + "start_ts": 1624888107 + } + } + } +}`), + expectedLength: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "packets_period_length", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_packets", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 60, + }, + }, + expectedStartTs: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "packets_period_start_ts", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_packets", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 1624888107, + }, + }, + }, + "DHCPPayloadPeriod": { + data: []byte(` +{ + "policy_dhcp": { + "dhcp": { + "period": { + "length": 60, + "start_ts": 1624888107 + } + } + } +}`), + expectedLength: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "dhcp_period_length", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dhcp", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 60, + }, + }, + expectedStartTs: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "dhcp_period_start_ts", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_dhcp", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 1624888107, + }, + }, + }, + "FlowPayloadPeriod": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "period": { + "length": 60, + "start_ts": 1624888107 + } + } + } +}`), + expectedLength: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_period_length", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 60, + }, + }, + expectedStartTs: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_period_start_ts", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 1624888107, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabelStartTs []prometheus.Label + var receivedDatapointStartTs prometheus.Datapoint + var receivedLabelLength []prometheus.Label + var receivedDatapointLength prometheus.Datapoint + for _, value := range res { + if c.expectedLength.Labels[0] == value.Labels[0] { + receivedLabelLength = value.Labels + receivedDatapointLength = value.Datapoint + } else if c.expectedStartTs.Labels[0] == value.Labels[0] { + receivedLabelStartTs = value.Labels + receivedDatapointStartTs = value.Datapoint + } + } + assert.True(t, reflect.DeepEqual(c.expectedLength.Labels, receivedLabelLength), fmt.Sprintf("%s: expected %v got %v", desc, c.expectedLength.Labels, receivedLabelLength)) + assert.Equal(t, c.expectedLength.Datapoint.Value, receivedDatapointLength.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expectedLength.Datapoint.Value, receivedDatapointLength.Value)) + assert.True(t, reflect.DeepEqual(c.expectedStartTs.Labels, receivedLabelStartTs), fmt.Sprintf("%s: expected %v got %v", desc, c.expectedStartTs.Labels, receivedLabelStartTs)) + assert.Equal(t, c.expectedStartTs.Datapoint.Value, receivedDatapointStartTs.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expectedStartTs.Datapoint.Value, receivedDatapointStartTs.Value)) + + }) + } +} + +func TestFlowCardinalityConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + commonLabels := []prometheus.Label{ + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + } + + cases := map[string]struct { + data []byte + expected prometheus.TimeSeries + }{ + "FlowPayloadCardinalityDstIpsOut": { + data: []byte(` + { + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "eth0": { + "cardinality": { + "dst_ips_out": 4 + } + } + } + } + } + } + } + }`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(append(commonLabels, prometheus.Label{ + Name: "device_interface", + Value: "192.168.4.7|eth0", + }), prometheus.Label{ + Name: "__name__", + Value: "flow_cardinality_dst_ips_out", + })), + Datapoint: prometheus.Datapoint{ + Value: 4, + }, + }, + }, + "FlowPayloadCardinalityDstPortsOut": { + data: []byte(` + { + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "37": { + "cardinality": { + "dst_ports_out": 31, + "src_ips_in": 4, + "src_ports_in": 31 + } + } + } + } + } + } + } + }`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(append(commonLabels, prometheus.Label{ + Name: "device_interface", + Value: "192.168.4.7|37", + }), prometheus.Label{ + Name: "__name__", + Value: "flow_cardinality_dst_ports_out", + })), + Datapoint: prometheus.Datapoint{ + Value: 31, + }, + }, + }, + "FlowPayloadCardinalitySrcIpsIn": { + data: []byte(` + { + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "37": { + "cardinality": { + "src_ips_in": 4, + "src_ports_in": 31 + } + } + } + } + } + } + } + }`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(append(commonLabels, + prometheus.Label{ + Name: "device_interface", + Value: "192.168.4.7|37", + }), prometheus.Label{ + Name: "__name__", + Value: "flow_cardinality_src_ips_in", + })), + Datapoint: prometheus.Datapoint{ + Value: 4, + }, + }, + }, + "FlowPayloadCardinalitySrcPortsIn": { + data: []byte(` + { + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "eth0": { + "cardinality": { + "src_ports_in": 31 + } + } + } + } + } + } + } + }`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(append(commonLabels, prometheus.Label{ + Name: "device_interface", + Value: "192.168.4.7|eth0", + }), prometheus.Label{ + Name: "__name__", + Value: "flow_cardinality_src_ports_in", + })), + Datapoint: prometheus.Datapoint{ + Value: 31, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint prometheus.Datapoint + for _, value := range res { + if c.expected.Labels[0] == value.Labels[0] { + if len(c.expected.Labels) < 7 { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } else { + if c.expected.Labels[6].Value == value.Labels[6].Value { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } + } + } + } + assert.True(t, reflect.DeepEqual(c.expected.Labels, receivedLabel), fmt.Sprintf("%s: expected %v got %v", desc, c.expected.Labels, receivedLabel)) + assert.Equal(t, c.expected.Datapoint.Value, receivedDatapoint.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expected.Datapoint.Value, receivedDatapoint.Value)) + }) + } + +} + +func TestFlowConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + commonLabels := []prometheus.Label{ + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + } + + cases := map[string]struct { + data []byte + expected prometheus.TimeSeries + }{ + "FlowPayloadRecordsFiltered": { + data: []byte(` + { + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "records_filtered": 8 + } + } + } + } + }`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "flow_records_filtered", + })), + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowPayloadRecordsFlows": { + data: []byte(` + { + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "records_flows": 8 + } + } + } + } + }`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(commonLabels, prometheus.Label{ + Name: "__name__", + Value: "flow_records_flows", + })), + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowPayloadInIpv4Bytes": { + data: []byte(` + { + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "37": { + "in_ipv4_bytes": 52785 + } + } + } + } + } + } + }`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(append(commonLabels, prometheus.Label{ + Name: "device_interface", + Value: "192.168.4.7|37", + }), prometheus.Label{ + Name: "__name__", + Value: "flow_in_ipv4_bytes", + })), + Datapoint: prometheus.Datapoint{ + Value: 52785, + }, + }, + }, + "FlowPayloadOutIpv6Packets": { + data: []byte(` + { + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "37": { + "out_ipv6_packets": 52785 + } + } + } + } + } + } + }`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(append(commonLabels, prometheus.Label{ + Name: "device_interface", + Value: "192.168.4.7|37", + }), prometheus.Label{ + Name: "__name__", + Value: "flow_out_ipv6_packets", + })), + Datapoint: prometheus.Datapoint{ + Value: 52785, + }, + }, + }, + "FlowPayloadInOtherL4Bytes": { + data: []byte(` + { + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "37": { + "in_other_l4_bytes": 52785 + } + } + } + } + } + } + }`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(append(commonLabels, prometheus.Label{ + Name: "device_interface", + Value: "192.168.4.7|37", + }), prometheus.Label{ + Name: "__name__", + Value: "flow_in_other_l4_bytes", + })), + Datapoint: prometheus.Datapoint{ + Value: 52785, + }, + }, + }, + "FlowPayloadOutTCPPackets": { + data: []byte(` + { + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "37": { + "out_tcp_packets": 52785 + } + } + } + } + } + } + }`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(append(commonLabels, prometheus.Label{ + Name: "device_interface", + Value: "192.168.4.7|37", + }), prometheus.Label{ + Name: "__name__", + Value: "flow_out_tcp_packets", + })), + Datapoint: prometheus.Datapoint{ + Value: 52785, + }, + }, + }, + "FlowPayloadInUdpPackets": { + data: []byte(` + { + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "7": { + "in_udp_bytes": 52785, + "out_udp_bytes": 52786 + }, + "8": { + "in_udp_bytes": 52787, + "out_udp_bytes": 52788 + } + } + } + } + } + } + }`), + expected: prometheus.TimeSeries{ + Labels: append(prependLabel(append(commonLabels, prometheus.Label{ + Name: "device_interface", + Value: "192.168.4.7|8", + }), prometheus.Label{ + Name: "__name__", + Value: "flow_in_udp_bytes", + })), + Datapoint: prometheus.Datapoint{ + Value: 52780, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint prometheus.Datapoint + for _, value := range res { + if c.expected.Labels[0] == value.Labels[0] { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } + } + assert.NotNil(t, receivedLabel) + assert.GreaterOrEqual(t, receivedDatapoint.Value, c.expected.Datapoint.Value) + }) + } + +} + +func TestFlowTopKMetricsConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + cases := map[string]struct { + data []byte + expected prometheus.TimeSeries + }{ + "FlowTopInDstIpsAndPortBytes": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "38": { + "top_in_dst_ips_and_port_bytes": [ + { + "estimate": 8, + "name": "10.4.2.2:5000" + } + ] + } + } + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_in_dst_ips_and_port_bytes", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "device_interface", + Value: "192.168.4.7|38", + }, + { + Name: "ip_port", + Value: "10.4.2.2:5000", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowTopOutDstIpsAndPortPackets": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "38": { + "top_out_dst_ips_and_port_packets": [ + { + "estimate": 8, + "name": "10.4.2.2:5000" + } + ] + } + } + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_out_dst_ips_and_port_packets", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "device_interface", + Value: "192.168.4.7|38", + }, + { + Name: "ip_port", + Value: "10.4.2.2:5000", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowTopInDstIpsBytes": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "37": { + "top_in_dst_ips_bytes": [ + { + "estimate": 8, + "name": "10.4.2.2" + } + ] + } + } + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_in_dst_ips_bytes", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "device_interface", + Value: "192.168.4.7|37", + }, + { + Name: "ip", + Value: "10.4.2.2", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowTopInDstIpsPackets": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "4": { + "top_in_dst_ips_packets": [ + { + "estimate": 8, + "name": "10.4.2.2" + } + ] + } + } + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_in_dst_ips_packets", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "device_interface", + Value: "192.168.4.7|4", + }, + { + Name: "ip", + Value: "10.4.2.2", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowTopOutDstPortsBytes": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "37": { + "top_out_dst_ports_bytes": [ + { + "estimate": 8, + "name": "5000" + } + ] + } + } + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_out_dst_ports_bytes", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "device_interface", + Value: "192.168.4.7|37", + }, + { + Name: "port", + Value: "5000", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowTopDstInPortsPackets": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "37": { + "top_in_dst_ports_packets": [ + { + "estimate": 8, + "name": "5000" + } + ] + } + } + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_in_dst_ports_packets", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "device_interface", + Value: "192.168.4.7|37", + }, + { + Name: "port", + Value: "5000", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowTopInInterfacesBytes": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "top_in_interfaces_bytes": [ + { + "estimate": 8, + "name": "300" + } + ] + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_in_interfaces_bytes", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "interface", + Value: "300", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowTopInInterfacesPackets": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "top_in_interfaces_packets": [ + { + "estimate": 8, + "name": "300" + } + ] + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_in_interfaces_packets", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "interface", + Value: "300", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowTopOutSrcIpsBytes": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "37": { + "top_out_src_ips_bytes": [ + { + "estimate": 15267, + "name": "192.168.0.1" + } + ] + } + } + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_out_src_ips_bytes", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "device_interface", + Value: "192.168.4.7|37", + }, + { + Name: "ip", + Value: "192.168.0.1", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 15267, + }, + }, + }, + "FlowTopOutInterfacesPackets": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "top_out_interfaces_packets": [ + { + "estimate": 8, + "name": "200" + } + ] + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_out_interfaces_packets", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "interface", + Value: "200", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, "FlowTopInSrcIpsAndPortBytes": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "37": { + "top_in_src_ips_and_port_bytes": [ + { + "estimate": 8, + "name": "10.4.2.2:5000" + } + ] + } + } + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_in_src_ips_and_port_bytes", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "device_interface", + Value: "192.168.4.7|37", + }, + { + Name: "ip_port", + Value: "10.4.2.2:5000", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowTopOutSrcIpsAndPortPackets": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "eth0": { + "top_out_src_ips_and_port_packets": [ + { + "estimate": 8, + "name": "10.4.2.2:5000" + } + ] + } + } + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_out_src_ips_and_port_packets", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "device_interface", + Value: "192.168.4.7|eth0", + }, + { + Name: "ip_port", + Value: "10.4.2.2:5000", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowTopInSrcIpsBytes": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "eth1": { + "top_in_src_ips_bytes": [ + { + "estimate": 8, + "name": "10.4.2.2" + } + ] + } + } + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_in_src_ips_bytes", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "device_interface", + Value: "192.168.4.7|eth1", + }, + { + Name: "ip", + Value: "10.4.2.2", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowTopOutSrcIpsPackets": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "36": { + "top_out_src_ips_packets": [ + { + "estimate": 8, + "name": "10.4.2.2" + } + ] + } + } + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_out_src_ips_packets", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "device_interface", + Value: "192.168.4.7|36", + }, + { + Name: "ip", + Value: "10.4.2.2", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowTopInSrcPortsBytes": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "38": { + "top_in_src_ports_bytes": [ + { + "estimate": 8, + "name": "4500" + } + ] + } + } + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_in_src_ports_bytes", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "device_interface", + Value: "192.168.4.7|38", + }, + { + Name: "port", + Value: "4500", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + "FlowTopOutSrcPortsPackets": { + data: []byte(` +{ + "policy_flow": { + "flow": { + "devices":{ + "192.168.4.7": { + "interfaces": { + "eth0": { + "top_out_src_ports_packets": [ + { + "estimate": 8, + "name": "4500" + } + ] + } + } + } + } + } + } +}`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "flow_top_out_src_ports_packets", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_flow", + }, + { + Name: "device", + Value: "192.168.4.7", + }, + { + Name: "device_interface", + Value: "192.168.4.7|eth0", + }, + { + Name: "port", + Value: "4500", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 8, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint prometheus.Datapoint + for _, value := range res { + if c.expected.Labels[0] == value.Labels[0] { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } + } + assert.True(t, reflect.DeepEqual(c.expected.Labels, receivedLabel), fmt.Sprintf("%s: expected %v got %v", desc, c.expected.Labels, receivedLabel)) + assert.Equal(t, c.expected.Datapoint.Value, receivedDatapoint.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expected.Datapoint.Value, receivedDatapoint.Value)) + }) + } + +} + +func TestAgentTagsConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + AgentTags: types.Tags{"testkey": "testvalue", "testkey2": "testvalue2"}, + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + } + + be := backend.GetBackend("pktvisor") + + cases := map[string]struct { + data []byte + expected prometheus.TimeSeries + }{ + "Example metrics": { + data: []byte(` + { + "policy_packets": { + "packets": { + "top_ASN": [ + { + "estimate": 996, + "name": "36236/NETACTUATE" + } + ] + } + } + }`), + expected: prometheus.TimeSeries{ + Labels: []prometheus.Label{ + { + Name: "__name__", + Value: "packets_top_ASN", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_packets", + }, + { + Name: "testkey", + Value: "testvalue", + }, + { + Name: "testkey2", + Value: "testvalue2", + }, + { + Name: "asn", + Value: "36236/NETACTUATE", + }, + }, + Datapoint: prometheus.Datapoint{ + Value: 996, + }, + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + data.Data = c.data + res, err := be.ProcessMetrics(agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + var receivedDatapoint prometheus.Datapoint + for _, value := range res { + if c.expected.Labels[0] == value.Labels[0] { + receivedLabel = value.Labels + receivedDatapoint = value.Datapoint + } + } + assert.ElementsMatch(t, c.expected.Labels, receivedLabel, fmt.Sprintf("%s: expected %v got %v", desc, c.expected.Labels, receivedLabel)) + assert.Equal(t, c.expected.Datapoint.Value, receivedDatapoint.Value, fmt.Sprintf("%s: expected value %f got %f", desc, c.expected.Datapoint.Value, receivedDatapoint.Value)) + }) + } +} + +func TestTagsConversion(t *testing.T) { + var logger = zap.NewNop() + pktvisor.Register(logger) + + ownerID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policyID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + agentID, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + var agent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + AgentTags: types.Tags{"test": "true"}, + OrbTags: types.Tags{"test2": "true2"}, + } + + var sameTagKeyAgent = &pb.AgentInfoRes{ + OwnerID: ownerID.String(), + AgentName: "agent-test", + AgentTags: types.Tags{"test": "true"}, + OrbTags: types.Tags{"test": "true2"}, + } + + data := fleet.AgentMetricsRPCPayload{ + PolicyID: policyID.String(), + PolicyName: "policy-test", + Datasets: nil, + Format: "json", + BEVersion: "1.0", + Data: []byte(` + { + "policy_packets": { + "packets": { + "top_ASN": [ + { + "estimate": 996, + "name": "36236/NETACTUATE" + } + ] + } + } + }`), + } + + be := backend.GetBackend("pktvisor") + + commonLabels := []prometheus.Label{ + { + Name: "__name__", + Value: "packets_top_ASN", + }, + { + Name: "instance", + Value: "agent-test", + }, + { + Name: "job", + Value: policyID.String(), + }, + { + Name: "agent_id", + Value: agentID.String(), + }, + { + Name: "agent", + Value: "agent-test", + }, + { + Name: "policy_id", + Value: policyID.String(), + }, + { + Name: "policy", + Value: "policy-test", + }, + { + Name: "handler", + Value: "policy_packets", + }, + { + Name: "asn", + Value: "36236/NETACTUATE", + }, + } + + cases := map[string]struct { + agent *pb.AgentInfoRes + expected prometheus.TimeSeries + }{ + "Different agent tags and orb tag": { + agent: agent, + expected: prometheus.TimeSeries{ + Labels: append(commonLabels, prometheus.Label{ + Name: "test", + Value: "true", + }, prometheus.Label{ + Name: "test2", + Value: "true2", + }), + }, + }, + "Same key agent tags and orb tag": { + agent: sameTagKeyAgent, + expected: prometheus.TimeSeries{ + Labels: append(commonLabels, prometheus.Label{ + Name: "test", + Value: "true2", + }), + }, + }, + } + + for desc, c := range cases { + t.Run(desc, func(t *testing.T) { + res, err := be.ProcessMetrics(c.agent, agentID.String(), data) + require.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + var receivedLabel []prometheus.Label + for _, value := range res { + if commonLabels[0].Value == value.Labels[0].Value { + receivedLabel = value.Labels + } + } + assert.ElementsMatch(t, c.expected.Labels, receivedLabel, fmt.Sprintf("%s: expected %v got %v", desc, c.expected.Labels, receivedLabel)) + }) + } + +} + +func prependLabel(labelList []prometheus.Label, label prometheus.Label) []prometheus.Label { + labelList = append(labelList, prometheus.Label{}) + copy(labelList[1:], labelList) + labelList[0] = label + return labelList +} + +func labelQuantiles(labelList []prometheus.Label, label prometheus.Label) []prometheus.Label { + for i := 0; i < 32; i += 8 { + labelList = append(labelList[:i+1], labelList[i:]...) + labelList[i] = label + } + return labelList +} diff --git a/sinker/backend/pktvisor/promwrapper.go b/sinker/backend/pktvisor/promwrapper.go new file mode 100644 index 000000000..8d936b5f2 --- /dev/null +++ b/sinker/backend/pktvisor/promwrapper.go @@ -0,0 +1,98 @@ +package pktvisor + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/orb-community/orb/sinker/prometheus" +) + +type labelList []prometheus.Label +type headerList []header +type dp prometheus.Datapoint + +type header struct { + name string + value string +} + +func (t *labelList) String() string { + var labels [][]string + for _, v := range []prometheus.Label(*t) { + labels = append(labels, []string{v.Name, v.Value}) + } + return fmt.Sprintf("%v", labels) +} + +func (t *labelList) Set(value string) error { + labelPair := strings.Split(value, ";") + + if len(labelPair) != 2 { + return fmt.Errorf("incorrect number of arguments to '-t': %d", len(labelPair)) + } + + label := prometheus.Label{ + Name: labelPair[0], + Value: labelPair[1], + } + + *t = append(*t, label) + + return nil +} + +func (h *headerList) String() string { + var headers [][]string + for _, v := range []header(*h) { + headers = append(headers, []string{v.name, v.value}) + } + return fmt.Sprintf("%v", headers) +} + +func (h *headerList) Set(value string) error { + firstSplit := strings.Index(value, ":") + if firstSplit == -1 { + return fmt.Errorf("header missing separating colon: '%v'", value) + } + + *h = append(*h, header{ + name: strings.TrimSpace(value[:firstSplit]), + value: strings.TrimSpace(value[firstSplit+1:]), + }) + + return nil +} + +func (d *dp) String() string { + return fmt.Sprintf("%v", []string{d.Timestamp.String(), fmt.Sprintf("%v", d.Value)}) +} + +func (d *dp) Set(value string) error { + dp := strings.Split(value, ",") + if len(dp) != 2 { + return fmt.Errorf("incorrect number of arguments to '-d': %d", len(dp)) + } + + var ts time.Time + if strings.ToLower(dp[0]) == "now" { + ts = time.Now() + } else { + i, err := strconv.Atoi(dp[0]) + if err != nil { + return fmt.Errorf("unable to parse timestamp: %s", dp[1]) + } + ts = time.Unix(int64(i), 0) + } + + val, err := strconv.ParseFloat(dp[1], 64) + if err != nil { + return fmt.Errorf("unable to parse value as float64: %s", dp[0]) + } + + d.Timestamp = ts + d.Value = val + + return nil +} diff --git a/sinker/backend/pktvisor/types.go b/sinker/backend/pktvisor/types.go new file mode 100644 index 000000000..a00f074ce --- /dev/null +++ b/sinker/backend/pktvisor/types.go @@ -0,0 +1,253 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +package pktvisor + +const PktvisorVersion = "4.2.0" + +// NameCount represents the count of a unique domain name +type NameCount struct { + Name string `mapstructure:"name"` + Estimate int64 `mapstructure:"estimate"` +} + +// Rates represents a histogram of rates at various percentiles +type Rates struct { + P50 int64 `mapstructure:"p50"` + P90 int64 `mapstructure:"p90"` + P95 int64 `mapstructure:"p95"` + P99 int64 `mapstructure:"p99"` +} + +// Quantiles represents a histogram of various percentiles +type Quantiles struct { + P50 int64 `mapstructure:"p50"` + P90 int64 `mapstructure:"p90"` + P95 int64 `mapstructure:"p95"` + P99 int64 `mapstructure:"p99"` +} + +// DHCPPayload contains the information specifically for the DNS protocol +type DHCPPayload struct { + WirePackets struct { + Filtered int64 `mapstructure:"filtered"` + Total int64 `mapstructure:"total"` + DeepSamples int64 `mapstructure:"deep_samples"` + Discover int64 `mapstructure:"discover"` + Offer int64 `mapstructure:"offer"` + Request int64 `mapstructure:"request"` + Ack int64 `mapstructure:"ack"` + Events int64 `mapstructure:"events"` + } `mapstructure:"wire_packets"` + Rates struct { + Total Rates `mapstructure:"total"` + Events Rates `mapstructure:"events"` + } `mapstructure:"rates"` + Period PeriodPayload `mapstructure:"period"` +} + +// DNSPayload contains the information specifically for the DNS protocol +type DNSPayload struct { + WirePackets struct { + Ipv4 int64 `mapstructure:"ipv4"` + Ipv6 int64 `mapstructure:"ipv6"` + Queries int64 `mapstructure:"queries"` + Replies int64 `mapstructure:"replies"` + TCP int64 `mapstructure:"tcp"` + Total int64 `mapstructure:"total"` + UDP int64 `mapstructure:"udp"` + Nodata int64 `mapstructure:"nodata"` + Noerror int64 `mapstructure:"noerror"` + Nxdomain int64 `mapstructure:"nxdomain"` + Srvfail int64 `mapstructure:"srvfail"` + Refused int64 `mapstructure:"refused"` + Filtered int64 `mapstructure:"filtered"` + DeepSamples int64 `mapstructure:"deep_samples"` + QueryECS int64 `mapstructure:"query_ecs"` + Events int64 `mapstructure:"events"` + } `mapstructure:"wire_packets"` + Rates struct { + Total Rates `mapstructure:"total"` + Events Rates `mapstructure:"events"` + } `mapstructure:"rates"` + Cardinality struct { + Qname int64 `mapstructure:"qname"` + } `mapstructure:"cardinality"` + Xact struct { + Counts struct { + Total int64 `mapstructure:"total"` + TimedOut int64 `mapstructure:"timed_out"` + } `mapstructure:"counts"` + In struct { + QuantilesUS Quantiles `mapstructure:"quantiles_us"` + TopSlow []NameCount `mapstructure:"top_slow"` + Total int64 `mapstructure:"total"` + } `mapstructure:"in"` + Out struct { + QuantilesUS Quantiles `mapstructure:"quantiles_us"` + TopSlow []NameCount `mapstructure:"top_slow"` + Total int64 `mapstructure:"total"` + } `mapstructure:"out"` + Ratio struct { + Quantiles struct { + P50 float64 `mapstructure:"p50"` + P90 float64 `mapstructure:"p90"` + P95 float64 `mapstructure:"p95"` + P99 float64 `mapstructure:"p99"` + } `mapstructure:"quantiles"` + } `mapstructure:"ratio"` + } `mapstructure:"xact"` + TopGeoLocECS []NameCount `mapstructure:"top_geoLoc_ecs"` + TopAsnECS []NameCount `mapstructure:"top_asn_ecs"` + TopQueryECS []NameCount `mapstructure:"top_query_ecs"` + TopQname2 []NameCount `mapstructure:"top_qname2"` + TopQname3 []NameCount `mapstructure:"top_qname3"` + TopNxdomain []NameCount `mapstructure:"top_nxdomain"` + TopQtype []NameCount `mapstructure:"top_qtype"` + TopRcode []NameCount `mapstructure:"top_rcode"` + TopREFUSED []NameCount `mapstructure:"top_refused"` + TopQnameByRespBytes []NameCount `mapstructure:"top_qname_by_resp_bytes"` + TopSRVFAIL []NameCount `mapstructure:"top_srvfail"` + TopNODATA []NameCount `mapstructure:"top_nodata"` + TopUDPPorts []NameCount `mapstructure:"top_udp_ports"` + Period PeriodPayload `mapstructure:"period"` +} + +// PacketPayload contains information about raw packets regardless of protocol +type PacketPayload struct { + Cardinality struct { + DstIpsOut int64 `mapstructure:"dst_ips_out"` + SrcIpsIn int64 `mapstructure:"src_ips_in"` + } `mapstructure:"cardinality"` + Ipv4 int64 `mapstructure:"ipv4"` + Ipv6 int64 `mapstructure:"ipv6"` + TCP int64 `mapstructure:"tcp"` + Total int64 `mapstructure:"total"` + UDP int64 `mapstructure:"udp"` + In int64 `mapstructure:"in"` + Out int64 `mapstructure:"out"` + UnknownDir int64 `mapstructure:"unknown_dir"` + OtherL4 int64 `mapstructure:"other_l4"` + DeepSamples int64 `mapstructure:"deep_samples"` + Filtered int64 `mapstructure:"filtered"` + Events int64 `mapstructure:"events"` + Protocol struct { + Tcp struct { + SYN int64 `mapstructure:"syn"` + } `mapstructure:"tcp"` + } `mapstructure:"protocol"` + PayloadSize Quantiles `mapstructure:"payload_size"` + Rates struct { + BytesIn Rates `mapstructure:"bytes_in"` + BytesOut Rates `mapstructure:"bytes_out"` + BytesTotal Rates `mapstructure:"bytes_total"` + PpsIn Rates `mapstructure:"pps_in"` + PpsOut Rates `mapstructure:"pps_out"` + PpsTotal Rates `mapstructure:"pps_total"` + PpsEvents Rates `mapstructure:"pps_events"` + } `mapstructure:"rates"` + TopIpv4 []NameCount `mapstructure:"top_ipv4"` + TopIpv6 []NameCount `mapstructure:"top_ipv6"` + TopGeoLoc []NameCount `mapstructure:"top_geoLoc"` + TopASN []NameCount `mapstructure:"top_asn"` + Period PeriodPayload `mapstructure:"period"` +} + +// PcapPayload contains information about pcap input stream +type PcapPayload struct { + TcpReassemblyErrors int64 `mapstructure:"tcp_reassembly_errors"` + IfDrops int64 `mapstructure:"if_drops"` + OsDrops int64 `mapstructure:"os_drops"` +} + +// PeriodPayload indicates the period of time for which a snapshot refers to +type PeriodPayload struct { + StartTS int64 `mapstructure:"start_ts"` + Length int64 `mapstructure:"length"` +} + +// FlowPayload contains the information specifically for the Flow protocol +type FlowPayload struct { + Devices map[string]struct { + RecordsFiltered int64 `mapstructure:"records_filtered"` + RecordsFlows int64 `mapstructure:"records_flows"` + TopInInterfacesBytes []NameCount `mapstructure:"top_in_interfaces_bytes"` + TopInInterfacesPackets []NameCount `mapstructure:"top_in_interfaces_packets"` + TopOutInterfacesBytes []NameCount `mapstructure:"top_out_interfaces_bytes"` + TopOutInterfacesPackets []NameCount `mapstructure:"top_out_interfaces_packets"` + Interfaces map[string]struct { + Cardinality struct { + Conversations int64 `mapstructure:"conversations"` + DstIpsOut int64 `mapstructure:"dst_ips_out"` + DstPortsOut int64 `mapstructure:"dst_ports_out"` + SrcIpsIn int64 `mapstructure:"src_ips_in"` + SrcPortsIn int64 `mapstructure:"src_ports_in"` + } `mapstructure:"cardinality"` + InIpv4Bytes int64 `mapstructure:"in_ipv4_bytes"` + InIpv4Packets int64 `mapstructure:"in_ipv4_packets"` + InIpv6Bytes int64 `mapstructure:"in_ipv6_bytes"` + InIpv6Packets int64 `mapstructure:"in_ipv6_packets"` + InOtherL4Bytes int64 `mapstructure:"in_other_l4_bytes"` + InOtherL4Packets int64 `mapstructure:"in_other_l4_packets"` + InTcpBytes int64 `mapstructure:"in_tcp_bytes"` + InTcpPackets int64 `mapstructure:"in_tcp_packets"` + InUdpBytes int64 `mapstructure:"in_udp_bytes"` + InUdpPackets int64 `mapstructure:"in_udp_packets"` + InBytes int64 `mapstructure:"in_bytes"` + InPackets int64 `mapstructure:"in_packets"` + OutIpv4Bytes int64 `mapstructure:"out_ipv4_bytes"` + OutIpv4Packets int64 `mapstructure:"out_ipv4_packets"` + OutIpv6Bytes int64 `mapstructure:"out_ipv6_bytes"` + OutIpv6Packets int64 `mapstructure:"out_ipv6_packets"` + OutOtherL4Bytes int64 `mapstructure:"out_other_l4_bytes"` + OutOtherL4Packets int64 `mapstructure:"out_other_l4_packets"` + OutTcpBytes int64 `mapstructure:"out_tcp_bytes"` + OutTcpPackets int64 `mapstructure:"out_tcp_packets"` + OutUdpBytes int64 `mapstructure:"out_udp_bytes"` + OutUdpPackets int64 `mapstructure:"out_udp_packets"` + OutBytes int64 `mapstructure:"out_bytes"` + OutPackets int64 `mapstructure:"out_packets"` + TopInSrcIpsBytes []NameCount `mapstructure:"top_in_src_ips_bytes"` + TopInSrcIpsPackets []NameCount `mapstructure:"top_in_src_ips_packets"` + TopInSrcPortsBytes []NameCount `mapstructure:"top_in_src_ports_bytes"` + TopInSrcPortsPackets []NameCount `mapstructure:"top_in_src_ports_packets"` + TopInSrcIpsAndPortBytes []NameCount `mapstructure:"top_in_src_ips_and_port_bytes"` + TopInSrcIpsAndPortPackets []NameCount `mapstructure:"top_in_src_ips_and_port_packets"` + TopInDstIpsBytes []NameCount `mapstructure:"top_in_dst_ips_bytes"` + TopInDstIpsPackets []NameCount `mapstructure:"top_in_dst_ips_packets"` + TopInDstPortsBytes []NameCount `mapstructure:"top_in_dst_ports_bytes"` + TopInDstPortsPackets []NameCount `mapstructure:"top_in_dst_ports_packets"` + TopInDstIpsAndPortBytes []NameCount `mapstructure:"top_in_dst_ips_and_port_bytes"` + TopInDstIpsAndPortPackets []NameCount `mapstructure:"top_in_dst_ips_and_port_packets"` + TopOutSrcIpsBytes []NameCount `mapstructure:"top_out_src_ips_bytes"` + TopOutSrcIpsPackets []NameCount `mapstructure:"top_out_src_ips_packets"` + TopOutSrcPortsBytes []NameCount `mapstructure:"top_out_src_ports_bytes"` + TopOutSrcPortsPackets []NameCount `mapstructure:"top_out_src_ports_packets"` + TopOutSrcIpsAndPortBytes []NameCount `mapstructure:"top_out_src_ips_and_port_bytes"` + TopOutSrcIpsAndPortPackets []NameCount `mapstructure:"top_out_src_ips_and_port_packets"` + TopOutDstIpsBytes []NameCount `mapstructure:"top_out_dst_ips_bytes"` + TopOutDstIpsPackets []NameCount `mapstructure:"top_out_dst_ips_packets"` + TopOutDstPortsBytes []NameCount `mapstructure:"top_out_dst_ports_bytes"` + TopOutDstPortsPackets []NameCount `mapstructure:"top_out_dst_ports_packets"` + TopOutDstIpsAndPortBytes []NameCount `mapstructure:"top_out_dst_ips_and_port_bytes"` + TopOutDstIpsAndPortPackets []NameCount `mapstructure:"top_out_dst_ips_and_port_packets"` + TopConversationsBytes []NameCount `mapstructure:"top_conversations_bytes"` + TopConversationsPackets []NameCount `mapstructure:"top_conversations_packets"` + TopGeoLocBytes []NameCount `mapstructure:"top_geo_loc_bytes"` + TopGeoLocPackets []NameCount `mapstructure:"top_geo_loc_packets"` + TopAsnBytes []NameCount `mapstructure:"top_ASN_bytes"` + TopAsnPackets []NameCount `mapstructure:"top_ASN_packets"` + } `mapstructure:"interfaces"` + } `mapstructure:"devices"` + Period PeriodPayload `mapstructure:"period"` +} + +// StatSnapshot is a snapshot of a given period from pktvisord +type StatSnapshot struct { + DNS *DNSPayload `mapstructure:"DNS,omitempty"` + DHCP *DHCPPayload `mapstructure:"DHCP,omitempty"` + Packets *PacketPayload `mapstructure:"Packets,omitempty"` + Pcap *PcapPayload `mapstructure:"Pcap,omitempty"` + Flow *FlowPayload `mapstructure:"Flow,omitempty"` +} diff --git a/sinker/config_state_check.go b/sinker/config_state_check.go new file mode 100644 index 000000000..7a6301805 --- /dev/null +++ b/sinker/config_state_check.go @@ -0,0 +1,63 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +package sinker + +import ( + "time" + + "github.com/orb-community/orb/sinker/config" + "go.uber.org/zap" +) + +const ( + streamID = "orb.sinker" + streamLen = 1000 + CheckerFreq = 5 * time.Minute + DefaultTimeout = 5 * time.Minute +) + +func (svc *SinkerService) checkState(_ time.Time) { + owners, err := svc.sinkerCache.GetAllOwners() + if err != nil { + svc.logger.Error("failed to retrieve the list of owners") + return + } + + for _, ownerID := range owners { + configs, err := svc.sinkerCache.GetAll(ownerID) + if err != nil { + svc.logger.Error("unable to retrieve policy state", zap.Error(err)) + return + } + for _, cfg := range configs { + // Set idle if the sinker is more than 30 minutes not sending metrics (Remove from Redis) + if cfg.LastRemoteWrite.Add(DefaultTimeout).Before(time.Now()) { + if cfg.State == config.Active { + if v, ok := cfg.Config["opentelemetry"]; !ok || v != "enabled" { + if err := svc.sinkerCache.Remove(cfg.OwnerID, cfg.SinkID); err != nil { + svc.logger.Error("error updating sink config cache", zap.Error(err)) + return + } + } + } + } + } + } +} + +func (svc *SinkerService) checkSinker() { + svc.checkState(time.Now()) + for { + select { + case <-svc.hbDone: + svc.otelMetricsCancelFunct() + svc.otelLogsCancelFunct() + svc.cancelAsyncContext() + return + case t := <-svc.hbTicker.C: + svc.checkState(t) + } + } +} diff --git a/sinker/message_handler.go b/sinker/message_handler.go new file mode 100644 index 000000000..7db500b25 --- /dev/null +++ b/sinker/message_handler.go @@ -0,0 +1,306 @@ +package sinker + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/google/uuid" + "github.com/mainflux/mainflux/pkg/messaging" + "github.com/orb-community/orb/fleet" + "github.com/orb-community/orb/fleet/pb" + "github.com/orb-community/orb/pkg/types" + pb2 "github.com/orb-community/orb/policies/pb" + "github.com/orb-community/orb/sinker/backend" + "github.com/orb-community/orb/sinker/config" + "github.com/orb-community/orb/sinker/prometheus" + pb3 "github.com/orb-community/orb/sinks/pb" + "go.uber.org/zap" +) + +func (svc SinkerService) remoteWriteToPrometheus(tsList prometheus.TSList, ownerID string, sinkID string) error { + cfgRepo, err := svc.sinkerCache.Get(ownerID, sinkID) + if err != nil { + svc.logger.Error("unable to retrieve the sink config", zap.Error(err)) + return err + } + ctx := context.Background() + otelMetadata, ok := cfgRepo.Config["opentelemetry"] + if ok && otelMetadata == "enabled" { + svc.logger.Info("deprecate warning opentelemetry sink scraping legacy agent", zap.String("sink-ID", cfgRepo.SinkID)) + ctx = context.WithValue(ctx, "deprecation", "opentelemetry") + } + configMetadata := cfgRepo.Config.GetSubMetadata("exporter") + if configMetadata == nil { + svc.logger.Error("unable to find prometheus remote host", zap.Error(err)) + return err + } + cfg := prometheus.NewConfig( + prometheus.WriteURLOption(configMetadata["remote_host"].(string)), + ) + + promClient, err := prometheus.NewClient(cfg) + if err != nil { + svc.logger.Error("unable to construct client", zap.Error(err)) + return err + } + authMetadata := cfgRepo.Config.GetSubMetadata("authentication") + if authMetadata == nil { + svc.logger.Error("unable to find prometheus remote host", zap.Error(err)) + return err + } + var headers = make(map[string]string) + headers["Authorization"] = svc.encodeBase64(authMetadata["username"].(string), authMetadata["password"].(string)) + result, writeErr := promClient.WriteTimeSeries(ctx, tsList, prometheus.WriteOptions{Headers: headers}) + if err := error(writeErr); err != nil { + if cfgRepo.Msg != fmt.Sprint(err) { + cfgRepo.State = config.Error + cfgRepo.Msg = fmt.Sprint(err) + cfgRepo.LastRemoteWrite = time.Now() + err := svc.sinkerCache.Edit(cfgRepo) + if err != nil { + svc.logger.Error("error during update sink cache", zap.Error(err)) + return err + } + } + + svc.logger.Error("remote write error", zap.String("sink_id", sinkID), zap.Error(err)) + return err + } + + svc.logger.Debug("successful sink", zap.Int("payload_size_b", result.PayloadSize), + zap.String("sink_id", sinkID)) + + if cfgRepo.State != config.Active { + cfgRepo.State = config.Active + cfgRepo.Msg = "" + cfgRepo.LastRemoteWrite = time.Now() + err := svc.sinkerCache.Edit(cfgRepo) + if err != nil { + return err + } + } + + return nil +} + +func (svc SinkerService) encodeBase64(user string, password string) string { + defer func(t time.Time) { + svc.logger.Debug("encodeBase64 took", zap.String("execution", time.Since(t).String())) + }(time.Now()) + sEnc := base64.URLEncoding.EncodeToString([]byte(user + ":" + password)) + return fmt.Sprintf("Basic %s", sEnc) +} + +func (svc SinkerService) handleMetrics(ctx context.Context, agentID string, channelID string, subtopic string, payload []byte) error { + + // find backend to send it to + beName := strings.Split(subtopic, ".") + if len(beName) < 3 || beName[0] != "be" || beName[2] != "m" { + return errors.New(fmt.Sprintf("invalid subtopic, ignoring: %s", subtopic)) + } + if !backend.HaveBackend(beName[1]) { + return errors.New(fmt.Sprintf("unknown agent backend, ignoring: %s", beName[1])) + } + be := backend.GetBackend(beName[1]) + + // unpack metrics RPC + var versionCheck fleet.SchemaVersionCheck + if err := json.Unmarshal(payload, &versionCheck); err != nil { + return fleet.ErrSchemaMalformed + } + if versionCheck.SchemaVersion != fleet.CurrentRPCSchemaVersion { + return fleet.ErrSchemaVersion + } + var rpc fleet.RPC + if err := json.Unmarshal(payload, &rpc); err != nil { + return fleet.ErrSchemaMalformed + } + if rpc.Func != fleet.AgentMetricsRPCFunc { + return errors.New(fmt.Sprintf("unexpected RPC function: %s", rpc.Func)) + } + var metricsRPC fleet.AgentMetricsRPC + if err := json.Unmarshal(payload, &metricsRPC); err != nil { + return fleet.ErrSchemaMalformed + } + + agentPb, err := svc.ExtractAgent(ctx, channelID) + if err != nil { + return err + } + + agentName, err := types.NewIdentifier(agentPb.AgentName) + if err != nil { + return err + } + agent := fleet.Agent{ + Name: agentName, + MFOwnerID: agentPb.OwnerID, + MFThingID: agentID, + MFChannelID: channelID, + OrbTags: (*types.Tags)(&agentPb.OrbTags), + AgentTags: agentPb.AgentTags, + } + + for _, metricsPayload := range metricsRPC.Payload { + // this payload loop is per policy. each policy has a list of datasets it is associated with, and each dataset may contain multiple sinks + // however, per policy, we want a unique set of sink IDs as we don't want to send the same metrics twice to the same sink for the same policy + datasetSinkIDs := make(map[string]bool) + // first go through the datasets and gather the unique set of sinks we need for this particular policy + err = svc.GetSinks(agent, metricsPayload, datasetSinkIDs) + if err != nil { + return err + } + + // ensure there are sinks + if len(datasetSinkIDs) == 0 { + svc.logger.Error("unable to attach any sinks to policy", zap.String("policy_id", metricsPayload.PolicyID), zap.String("agent_id", agentID), zap.String("owner_id", agent.MFOwnerID)) + continue + } + + // now that we have the sinks, process the metrics for this policy + tsList, err := be.ProcessMetrics(agentPb, agentID, metricsPayload) + if err != nil { + svc.logger.Error("ProcessMetrics failed", zap.String("policy_id", metricsPayload.PolicyID), zap.String("agent_id", agentID), zap.String("owner_id", agent.MFOwnerID), zap.Error(err)) + continue + } + + // finally, sink this policy + svc.SinkPolicy(agent, metricsPayload, datasetSinkIDs, tsList) + } + + return nil +} + +func (svc SinkerService) ExtractAgent(ctx context.Context, channelID string) (*pb.AgentInfoRes, error) { + agentPb, err := svc.fleetClient.RetrieveAgentInfoByChannelID(ctx, &pb.AgentInfoByChannelIDReq{Channel: channelID}) + if err != nil { + return nil, err + } + return agentPb, nil +} + +func (svc SinkerService) SinkPolicy(agent fleet.Agent, metricsPayload fleet.AgentMetricsRPCPayload, datasetSinkIDs map[string]bool, tsList []prometheus.TimeSeries) { + sinkIDList := make([]string, len(datasetSinkIDs)) + i := 0 + for k := range datasetSinkIDs { + sinkIDList[i] = k + i++ + } + svc.logger.Info("sinking agent metric RPC", + zap.String("owner_id", agent.MFOwnerID), + zap.String("agent", agent.Name.String()), + zap.String("policy", metricsPayload.PolicyName), + zap.String("policy_id", metricsPayload.PolicyID), + zap.Strings("sinks", sinkIDList)) + + for _, id := range sinkIDList { + err := svc.remoteWriteToPrometheus(tsList, agent.MFOwnerID, id) + if err != nil { + svc.logger.Warn(fmt.Sprintf("unable to remote write to sinkID: %s", id), zap.String("policy_id", metricsPayload.PolicyID), zap.String("agent_id", agent.MFThingID), zap.String("owner_id", agent.MFOwnerID), zap.Error(err)) + } + + // send operational metrics + labels := []string{ + "method", "sinker_payload_size", + "agent_id", agent.MFThingID, + "agent", agent.Name.String(), + "policy_id", metricsPayload.PolicyID, + "policy", metricsPayload.PolicyName, + "sink_id", id, + "owner_id", agent.MFOwnerID, + } + svc.requestCounter.With(labels...).Add(1) + svc.requestGauge.With(labels...).Add(float64(len(metricsPayload.Data))) + } +} + +func (svc SinkerService) GetSinks(agent fleet.Agent, agentMetricsRPCPayload fleet.AgentMetricsRPCPayload, datasetSinkIDs map[string]bool) error { + for _, ds := range agentMetricsRPCPayload.Datasets { + if ds == "" { + svc.logger.Error("malformed agent RPC: empty dataset", zap.String("agent_id", agent.MFThingID), zap.String("owner_id", agent.MFOwnerID)) + continue + } + dataset, err := svc.policiesClient.RetrieveDataset(context.Background(), &pb2.DatasetByIDReq{ + DatasetID: ds, + OwnerID: agent.MFOwnerID, + }) + if err != nil { + svc.logger.Error("unable to retrieve dataset", zap.String("dataset_id", ds), zap.String("owner_id", agent.MFOwnerID), zap.Error(err)) + continue + } + for _, sid := range dataset.SinkIds { + if !svc.sinkerCache.Exists(agent.MFOwnerID, sid) { + // Use the retrieved sinkID to get the backend config + sink, err := svc.sinksClient.RetrieveSink(context.Background(), &pb3.SinkByIDReq{ + SinkID: sid, + OwnerID: agent.MFOwnerID, + }) + if err != nil { + return err + } + + var data config.SinkConfig + if err := json.Unmarshal(sink.Config, &data); err != nil { + return err + } + + data.SinkID = sid + data.OwnerID = agent.MFOwnerID + err = svc.sinkerCache.Add(data) + if err != nil { + return err + } + } + datasetSinkIDs[sid] = true + } + } + return nil +} + +func (svc SinkerService) handleMsgFromAgent(msg messaging.Message) error { + inputContext := context.WithValue(context.Background(), "trace-id", uuid.NewString()) + go func(ctx context.Context) { + defer func(t time.Time) { + svc.logger.Info("message consumption time", zap.String("execution", time.Since(t).String())) + }(time.Now()) + // NOTE: we need to consider ALL input from the agent as untrusted, the same as untrusted HTTP API would be + var payload map[string]interface{} + if err := json.Unmarshal(msg.Payload, &payload); err != nil { + svc.logger.Error("metrics processing failure", zap.Any("trace-id", ctx.Value("trace-id")), zap.Error(err)) + return + } + + svc.logger.Debug("received agent message", + zap.String("subtopic", msg.Subtopic), + zap.String("channel", msg.Channel), + zap.String("protocol", msg.Protocol), + zap.Int64("created", msg.Created), + zap.String("publisher", msg.Publisher)) + + labels := []string{ + "method", "handleMsgFromAgent", + "agent_id", msg.Publisher, + "subtopic", msg.Subtopic, + "channel", msg.Channel, + "protocol", msg.Protocol, + } + svc.messageInputCounter.With(labels...).Add(1) + + if len(msg.Payload) > MaxMsgPayloadSize { + svc.logger.Error("metrics processing failure", zap.Any("trace-id", ctx.Value("trace-id")), zap.Error(ErrPayloadTooBig)) + return + } + + if err := svc.handleMetrics(ctx, msg.Publisher, msg.Channel, msg.Subtopic, msg.Payload); err != nil { + svc.logger.Error("metrics processing failure", zap.Any("trace-id", ctx.Value("trace-id")), zap.Error(err)) + return + } + }(inputContext) + + return nil +} diff --git a/sinker/otel/bridgeservice/bridge.go b/sinker/otel/bridgeservice/bridge.go index bd348758e..db8053531 100644 --- a/sinker/otel/bridgeservice/bridge.go +++ b/sinker/otel/bridgeservice/bridge.go @@ -2,8 +2,9 @@ package bridgeservice import ( "context" + "encoding/json" "fmt" - "github.com/orb-community/orb/sinker/redis/producer" + "github.com/orb-community/orb/pkg/types" sinkspb "github.com/orb-community/orb/sinks/pb" "sort" "time" @@ -11,6 +12,7 @@ import ( "github.com/go-kit/kit/metrics" fleetpb "github.com/orb-community/orb/fleet/pb" policiespb "github.com/orb-community/orb/policies/pb" + "github.com/orb-community/orb/sinker/config" "github.com/patrickmn/go-cache" "go.uber.org/zap" ) @@ -26,7 +28,7 @@ type BridgeService interface { func NewBridgeService(logger *zap.Logger, defaultCacheExpiration time.Duration, - sinkActivity producer.SinkActivityProducer, + sinkerCache config.ConfigRepo, policiesClient policiespb.PolicyServiceClient, sinksClient sinkspb.SinkServiceClient, fleetClient fleetpb.FleetServiceClient, messageInputCounter metrics.Counter) SinkerOtelBridgeService { @@ -34,7 +36,7 @@ func NewBridgeService(logger *zap.Logger, defaultCacheExpiration: defaultCacheExpiration, inMemoryCache: *cache.New(defaultCacheExpiration, defaultCacheExpiration*2), logger: logger, - sinkerActivitySvc: sinkActivity, + sinkerCache: sinkerCache, policiesClient: policiesClient, fleetClient: fleetClient, sinksClient: sinksClient, @@ -46,14 +48,14 @@ type SinkerOtelBridgeService struct { inMemoryCache cache.Cache defaultCacheExpiration time.Duration logger *zap.Logger - sinkerActivitySvc producer.SinkActivityProducer + sinkerCache config.ConfigRepo policiesClient policiespb.PolicyServiceClient fleetClient fleetpb.FleetServiceClient sinksClient sinkspb.SinkServiceClient messageInputCounter metrics.Counter } -// IncrementMessageCounter add to our metrics the number of messages received +// Implementar nova funcao func (bs *SinkerOtelBridgeService) IncrementMessageCounter(publisher, subtopic, channel, protocol string) { labels := []string{ "method", "handleMsgFromAgent", @@ -65,34 +67,71 @@ func (bs *SinkerOtelBridgeService) IncrementMessageCounter(publisher, subtopic, bs.messageInputCounter.With(labels...).Add(1) } -// NotifyActiveSink notify the sinker that a sink is active -func (bs *SinkerOtelBridgeService) NotifyActiveSink(ctx context.Context, mfOwnerId, sinkId, size string) error { - cacheKey := fmt.Sprintf("active_sink-%s-%s", mfOwnerId, sinkId) - _, found := bs.inMemoryCache.Get(cacheKey) - if !found { - bs.logger.Debug("notifying active sink", zap.String("sink_id", sinkId), zap.String("owner_id", mfOwnerId), - zap.String("payload_size", size)) - event := producer.SinkActivityEvent{ - OwnerID: mfOwnerId, - SinkID: sinkId, - State: "active", - Size: size, - Timestamp: time.Now(), +func (bs *SinkerOtelBridgeService) NotifyActiveSink(ctx context.Context, mfOwnerId, sinkId, newState, message string) error { + cfgRepo, err := bs.sinkerCache.Get(mfOwnerId, sinkId) + if err != nil { + bs.logger.Error("unable to retrieve the sink config", zap.Error(err)) + sinkData, _ := bs.sinksClient.RetrieveSink(ctx, &sinkspb.SinkByIDReq{ + SinkID: sinkId, + OwnerID: mfOwnerId, + }) + var metadata types.Metadata + _ = json.Unmarshal(sinkData.Config, &metadata) + cfgRepo = config.SinkConfig{ + SinkID: sinkId, + OwnerID: mfOwnerId, + Config: metadata, + State: config.Active, + Msg: "", } - err := bs.sinkerActivitySvc.PublishSinkActivity(ctx, event) + err = bs.sinkerCache.DeployCollector(ctx, cfgRepo) if err != nil { - bs.logger.Error("error publishing sink activity", zap.Error(err)) + bs.logger.Error("error during update sink cache", zap.String("sinkId", sinkId), zap.Error(err)) + return err + } + } + + // only updates sink state if status Idle or Unknown + if cfgRepo.State == config.Idle || cfgRepo.State == config.Unknown { + cfgRepo.LastRemoteWrite = time.Now() + // only deploy collector if new state is "active" and current state "not active" + if newState == "active" && cfgRepo.State != config.Active { + err = cfgRepo.State.SetFromString(newState) + if err != nil { + bs.logger.Error("unable to set state", zap.String("new_state", newState), zap.Error(err)) + return err + } + err = bs.sinkerCache.AddActivity(mfOwnerId, sinkId) + if err != nil { + bs.logger.Error("error during update last remote write", zap.String("sinkId", sinkId), zap.Error(err)) + return err + } + err = bs.sinkerCache.DeployCollector(ctx, cfgRepo) + if err != nil { + bs.logger.Error("error during update sink cache", zap.String("sinkId", sinkId), zap.Error(err)) + return err + } + bs.logger.Info("waking up sink to active", zap.String("sinkID", sinkId), zap.String("newState", newState), zap.Any("currentState", cfgRepo.State)) + } else { + err = bs.sinkerCache.AddActivity(mfOwnerId, sinkId) + if err != nil { + bs.logger.Error("error during update last remote write", zap.String("sinkId", sinkId), zap.Error(err)) + return err + } + bs.logger.Info("registering sink activity", zap.String("sinkID", sinkId), zap.String("newState", newState), zap.Any("currentState", cfgRepo.State)) } - bs.inMemoryCache.Set(cacheKey, true, cache.DefaultExpiration) } else { - bs.logger.Debug("active sink already notified", zap.String("sink_id", sinkId), zap.String("owner_id", mfOwnerId), - zap.String("payload_size", size)) + err = bs.sinkerCache.AddActivity(mfOwnerId, sinkId) + if err != nil { + bs.logger.Error("error during update last remote write", zap.String("sinkId", sinkId), zap.Error(err)) + return err + } + bs.logger.Info("registering sink activity", zap.String("sinkID", sinkId), zap.String("newState", newState), zap.Any("currentState", cfgRepo.State)) } return nil } -// ExtractAgent retrieve agent info from fleet, or cache func (bs *SinkerOtelBridgeService) ExtractAgent(ctx context.Context, channelID string) (*fleetpb.AgentInfoRes, error) { cacheKey := fmt.Sprintf("agent-%s", channelID) value, found := bs.inMemoryCache.Get(cacheKey) @@ -107,7 +146,6 @@ func (bs *SinkerOtelBridgeService) ExtractAgent(ctx context.Context, channelID s return value.(*fleetpb.AgentInfoRes), nil } -// GetPolicyName retrieve policy info from policies service, or cache. func (bs *SinkerOtelBridgeService) GetPolicyName(ctx context.Context, policyId, ownerID string) (*policiespb.PolicyRes, error) { cacheKey := fmt.Sprintf("policy-%s", policyId) value, found := bs.inMemoryCache.Get(cacheKey) @@ -122,7 +160,6 @@ func (bs *SinkerOtelBridgeService) GetPolicyName(ctx context.Context, policyId, return value.(*policiespb.PolicyRes), nil } -// GetSinkIdsFromDatasetIDs retrieve sink_ids from datasets from policies service, or cache func (bs *SinkerOtelBridgeService) GetSinkIdsFromDatasetIDs(ctx context.Context, mfOwnerId string, datasetIDs []string) (map[string]string, error) { // Here needs to retrieve datasets mapSinkIdPolicy := make(map[string]string) diff --git a/sinker/otel/kafkafanoutexporter/kafka_exporter.go b/sinker/otel/kafkafanoutexporter/kafka_exporter.go index cfb252d51..d07474126 100644 --- a/sinker/otel/kafkafanoutexporter/kafka_exporter.go +++ b/sinker/otel/kafkafanoutexporter/kafka_exporter.go @@ -50,7 +50,7 @@ func (ke kafkaErrors) Error() string { func (e *kafkaTracesProducer) tracesPusher(ctx context.Context, td ptrace.Traces) error { sinkId := ctx.Value("sink_id").(string) topic := e.topic + "-" + sinkId - e.logger.Debug("Pushing traces to kafka topic = " + topic) + e.logger.Info("Pushing traces to kafka topic = " + topic) messages, err := e.marshaler.Marshal(td, topic) if err != nil { return consumererror.NewPermanent(err) @@ -83,7 +83,7 @@ type kafkaMetricsProducer struct { func (e *kafkaMetricsProducer) metricsDataPusher(ctx context.Context, md pmetric.Metrics) error { sinkId := ctx.Value("sink_id").(string) topic := e.topic + "-" + sinkId - e.logger.Debug("Pushing metrics to kafka topic = " + topic) + e.logger.Info("Pushing metrics to kafka topic = " + topic) messages, err := e.marshaler.Marshal(md, topic) if err != nil { return consumererror.NewPermanent(err) @@ -116,7 +116,7 @@ type kafkaLogsProducer struct { func (e *kafkaLogsProducer) logsDataPusher(ctx context.Context, ld plog.Logs) error { sinkId := ctx.Value("sink_id").(string) topic := e.topic + "-" + sinkId - e.logger.Debug("Pushing logs to kafka topic = " + topic) + e.logger.Info("Pushing logs to kafka topic = " + topic) messages, err := e.marshaler.Marshal(ld, topic) if err != nil { return consumererror.NewPermanent(err) diff --git a/sinker/otel/orbreceiver/logs.go b/sinker/otel/orbreceiver/logs.go index bff9a860b..86f844499 100644 --- a/sinker/otel/orbreceiver/logs.go +++ b/sinker/otel/orbreceiver/logs.go @@ -6,7 +6,6 @@ package orbreceiver import ( "context" - "strconv" "strings" "github.com/mainflux/mainflux/pkg/messaging" @@ -32,7 +31,6 @@ func (r *OrbReceiver) MessageLogsInbound(msg messaging.Message) error { zap.Int64("created", msg.Created), zap.String("publisher", msg.Publisher)) r.cfg.Logger.Info("received log message, pushing to kafka exporter") - size := len(msg.Payload) decompressedPayload := r.DecompressBrotli(msg.Payload) lr, err := r.encoder.unmarshalLogsRequest(decompressedPayload) if err != nil { @@ -49,13 +47,13 @@ func (r *OrbReceiver) MessageLogsInbound(msg messaging.Message) error { scopes := lr.Logs().ResourceLogs().At(0).ScopeLogs() for i := 0; i < scopes.Len(); i++ { - r.ProccessLogsContext(scopes.At(i), msg.Channel, size) + r.ProccessLogsContext(scopes.At(i), msg.Channel) } }() return nil } -func (r *OrbReceiver) ProccessLogsContext(scope plog.ScopeLogs, channel string, size int) { +func (r *OrbReceiver) ProccessLogsContext(scope plog.ScopeLogs, channel string) { // Extract Datasets attrDataset, ok := scope.Scope().Attributes().Get("dataset_ids") if !ok { @@ -109,6 +107,7 @@ func (r *OrbReceiver) ProccessLogsContext(scope plog.ScopeLogs, channel string, attributeCtx = context.WithValue(attributeCtx, "agent_groups", agentPb.AgentGroupIDs) attributeCtx = context.WithValue(attributeCtx, "agent_ownerID", agentPb.OwnerID) for sinkId := range sinkIds { + err := r.cfg.SinkerService.NotifyActiveSink(r.ctx, agentPb.OwnerID, sinkId, "active", "") if err != nil { r.cfg.Logger.Error("error notifying logs sink active, changing state, skipping sink", zap.String("sink-id", sinkId), zap.Error(err)) continue @@ -122,10 +121,8 @@ func (r *OrbReceiver) ProccessLogsContext(scope plog.ScopeLogs, channel string, _, err = r.exportLogs(attributeCtx, request) if err != nil { r.cfg.Logger.Error("error during logs export, skipping sink", zap.Error(err)) - _ = r.cfg.SinkerService.NotifyActiveSink(r.ctx, agentPb.OwnerID, sinkId, "0") + _ = r.cfg.SinkerService.NotifyActiveSink(r.ctx, agentPb.OwnerID, sinkId, "error", err.Error()) continue - } else { - _ = r.cfg.SinkerService.NotifyActiveSink(r.ctx, agentPb.OwnerID, sinkId, strconv.Itoa(size)) } } } diff --git a/sinker/otel/orbreceiver/metrics.go b/sinker/otel/orbreceiver/metrics.go index a847b11dd..823c61741 100644 --- a/sinker/otel/orbreceiver/metrics.go +++ b/sinker/otel/orbreceiver/metrics.go @@ -6,7 +6,6 @@ package orbreceiver import ( "context" - "strconv" "strings" "time" @@ -33,8 +32,7 @@ func (r *OrbReceiver) MessageMetricsInbound(msg messaging.Message) error { zap.String("protocol", msg.Protocol), zap.Int64("created", msg.Created), zap.String("publisher", msg.Publisher)) - r.cfg.Logger.Debug("received metric message, pushing to kafka exporter", zap.String("publisher", msg.Publisher)) - size := len(msg.Payload) + r.cfg.Logger.Info("received metric message, pushing to kafka exporter") decompressedPayload := r.DecompressBrotli(msg.Payload) mr, err := r.encoder.unmarshalMetricsRequest(decompressedPayload) if err != nil { @@ -51,13 +49,13 @@ func (r *OrbReceiver) MessageMetricsInbound(msg messaging.Message) error { scopes := mr.Metrics().ResourceMetrics().At(0).ScopeMetrics() for i := 0; i < scopes.Len(); i++ { - r.ProccessMetricsContext(scopes.At(i), msg.Channel, size) + r.ProccessMetricsContext(scopes.At(i), msg.Channel) } }() return nil } -func (r *OrbReceiver) ProccessMetricsContext(scope pmetric.ScopeMetrics, channel string, size int) { +func (r *OrbReceiver) ProccessMetricsContext(scope pmetric.ScopeMetrics, channel string) { // Extract Datasets attrDataset, ok := scope.Scope().Attributes().Get("dataset_ids") if !ok { @@ -112,9 +110,8 @@ func (r *OrbReceiver) ProccessMetricsContext(scope pmetric.ScopeMetrics, channel attributeCtx = context.WithValue(attributeCtx, "orb_tags", agentPb.OrbTags) attributeCtx = context.WithValue(attributeCtx, "agent_groups", agentPb.AgentGroupIDs) attributeCtx = context.WithValue(attributeCtx, "agent_ownerID", agentPb.OwnerID) - for sinkId := range sinkIds { - err := r.cfg.SinkerService.NotifyActiveSink(r.ctx, agentPb.OwnerID, sinkId, strconv.Itoa(size)) + err := r.cfg.SinkerService.NotifyActiveSink(r.ctx, agentPb.OwnerID, sinkId, "active", "") if err != nil { r.cfg.Logger.Error("error notifying metrics sink active, changing state, skipping sink", zap.String("sink-id", sinkId), zap.Error(err)) } @@ -127,6 +124,7 @@ func (r *OrbReceiver) ProccessMetricsContext(scope pmetric.ScopeMetrics, channel _, err = r.exportMetrics(attributeCtx, request) if err != nil { r.cfg.Logger.Error("error during metrics export, skipping sink", zap.Error(err)) + _ = r.cfg.SinkerService.NotifyActiveSink(r.ctx, agentPb.OwnerID, sinkId, "error", err.Error()) continue } } diff --git a/sinker/otel/orbreceiver/traces.go b/sinker/otel/orbreceiver/traces.go index af2bdbab3..13486c32d 100644 --- a/sinker/otel/orbreceiver/traces.go +++ b/sinker/otel/orbreceiver/traces.go @@ -6,7 +6,6 @@ package orbreceiver import ( "context" - "strconv" "strings" "github.com/mainflux/mainflux/pkg/messaging" @@ -32,7 +31,6 @@ func (r *OrbReceiver) MessageTracesInbound(msg messaging.Message) error { zap.Int64("created", msg.Created), zap.String("publisher", msg.Publisher)) r.cfg.Logger.Info("received trace message, pushing to kafka exporter") - size := len(msg.Payload) decompressedPayload := r.DecompressBrotli(msg.Payload) tr, err := r.encoder.unmarshalTracesRequest(decompressedPayload) if err != nil { @@ -49,13 +47,13 @@ func (r *OrbReceiver) MessageTracesInbound(msg messaging.Message) error { scopes := tr.Traces().ResourceSpans().At(0).ScopeSpans() for i := 0; i < scopes.Len(); i++ { - r.ProccessTracesContext(scopes.At(i), msg.Channel, size) + r.ProccessTracesContext(scopes.At(i), msg.Channel) } }() return nil } -func (r *OrbReceiver) ProccessTracesContext(scope ptrace.ScopeSpans, channel string, size int) { +func (r *OrbReceiver) ProccessTracesContext(scope ptrace.ScopeSpans, channel string) { // Extract Datasets attrDataset, ok := scope.Scope().Attributes().Get("dataset_ids") if !ok { @@ -108,9 +106,8 @@ func (r *OrbReceiver) ProccessTracesContext(scope ptrace.ScopeSpans, channel str attributeCtx = context.WithValue(attributeCtx, "orb_tags", agentPb.OrbTags) attributeCtx = context.WithValue(attributeCtx, "agent_groups", agentPb.AgentGroupIDs) attributeCtx = context.WithValue(attributeCtx, "agent_ownerID", agentPb.OwnerID) - for sinkId := range sinkIds { - err := r.cfg.SinkerService.NotifyActiveSink(r.ctx, agentPb.OwnerID, sinkId, strconv.Itoa(size)) + err := r.cfg.SinkerService.NotifyActiveSink(r.ctx, agentPb.OwnerID, sinkId, "active", "") if err != nil { r.cfg.Logger.Error("error notifying sink active, changing state, skipping sink", zap.String("sink-id", sinkId), zap.Error(err)) continue @@ -124,6 +121,7 @@ func (r *OrbReceiver) ProccessTracesContext(scope ptrace.ScopeSpans, channel str _, err = r.exportTraces(attributeCtx, request) if err != nil { r.cfg.Logger.Error("error during export, skipping sink", zap.Error(err)) + _ = r.cfg.SinkerService.NotifyActiveSink(r.ctx, agentPb.OwnerID, sinkId, "error", err.Error()) continue } } diff --git a/sinker/prometheus/client.go b/sinker/prometheus/client.go new file mode 100644 index 000000000..211224770 --- /dev/null +++ b/sinker/prometheus/client.go @@ -0,0 +1,301 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package prometheus + +import ( + "bytes" + "context" + "errors" + "fmt" + "io/ioutil" + "net/http" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/snappy" + "github.com/prometheus/prometheus/prompb" +) + +const ( + defaulHTTPClientTimeout = 10 * time.Second + defaultUserAgent = "orb-promremote-go/1.0.0" +) + +// DefaultConfig represents the default configuration used to construct a client. +var DefaultConfig = Config{ + HTTPClientTimeout: defaulHTTPClientTimeout, + UserAgent: defaultUserAgent, +} + +// Label is a metric label. +type Label struct { + Name string + Value string +} + +// TimeSeries are made of labels and a datapoint. +type TimeSeries struct { + Labels []Label + Datapoint Datapoint +} + +// TSList is a slice of TimeSeries. +type TSList []TimeSeries + +// A Datapoint is a single data value reported at a given time. +type Datapoint struct { + Timestamp time.Time + Value float64 +} + +// Client is used to write timeseries data to a Prom remote write endpoint +type Client interface { + // WriteProto writes the Prom proto WriteRequest to the specified endpoint. + WriteProto( + ctx context.Context, + req *prompb.WriteRequest, + opts WriteOptions, + ) (WriteResult, WriteError) + + // WriteTimeSeries converts the []TimeSeries to Protobuf then writes it to the specified endpoint. + WriteTimeSeries( + ctx context.Context, + ts TSList, + opts WriteOptions, + ) (WriteResult, WriteError) +} + +// WriteOptions specifies additional write options. +type WriteOptions struct { + // Headers to append or override the outgoing headers. + Headers map[string]string +} + +// WriteResult returns the successful HTTP status code. +type WriteResult struct { + StatusCode int + PayloadSize int +} + +// WriteError is an error that can also return the HTTP status code +// if the response is what caused an error. +type WriteError interface { + error + StatusCode() int +} + +// Config defines the configuration used to construct a client. +type Config struct { + // WriteURL is the URL which the client uses to write to prometheus. + WriteURL string `yaml:"writeURL"` + + //HTTPClientTimeout is the timeout that is set for the client. + HTTPClientTimeout time.Duration `yaml:"httpClientTimeout"` + + // If not nil, http client is used instead of constructing one. + HTTPClient *http.Client + + // UserAgent is the `User-Agent` header in the request. + UserAgent string `yaml:"userAgent"` +} + +// ConfigOption defines a config option that can be used when constructing a client. +type ConfigOption func(*Config) + +// NewConfig creates a new Config struct based on options passed to the function. +func NewConfig(opts ...ConfigOption) Config { + cfg := DefaultConfig + for _, opt := range opts { + opt(&cfg) + } + + return cfg +} + +func (c Config) validate() error { + if c.HTTPClientTimeout <= 0 { + return fmt.Errorf("http client timeout should be greater than 0: %d", c.HTTPClientTimeout) + } + + if c.UserAgent == "" { + return errors.New("User-Agent should not be blank") + } + + return nil +} + +// WriteURLOption sets the URL which the client uses to write to prometheus. +func WriteURLOption(writeURL string) ConfigOption { + return func(c *Config) { + c.WriteURL = writeURL + } +} + +// HTTPClientTimeoutOption sets the timeout that is set for the client. +func HTTPClientTimeoutOption(httpClientTimeout time.Duration) ConfigOption { + return func(c *Config) { + c.HTTPClientTimeout = httpClientTimeout + } +} + +// HTTPClientOption sets the HTTP client that is set for the client. +func HTTPClientOption(httpClient *http.Client) ConfigOption { + return func(c *Config) { + c.HTTPClient = httpClient + } +} + +// UserAgent sets the `User-Agent` header in the request. +func UserAgent(userAgent string) ConfigOption { + return func(c *Config) { + c.UserAgent = userAgent + } +} + +type client struct { + writeURL string + httpClient *http.Client + userAgent string +} + +// NewClient creates a new remote write coordinator client. +func NewClient(c Config) (Client, error) { + if err := c.validate(); err != nil { + return nil, err + } + + httpClient := &http.Client{ + Timeout: c.HTTPClientTimeout, + } + + if c.HTTPClient != nil { + httpClient = c.HTTPClient + } + + return &client{ + writeURL: c.WriteURL, + httpClient: httpClient, + }, nil +} + +func (c *client) WriteTimeSeries( + ctx context.Context, + seriesList TSList, + opts WriteOptions, +) (WriteResult, WriteError) { + return c.WriteProto(ctx, seriesList.toPromWriteRequest(), opts) +} + +func (c *client) WriteProto( + ctx context.Context, + promWR *prompb.WriteRequest, + opts WriteOptions, +) (WriteResult, WriteError) { + var result WriteResult + data, err := proto.Marshal(promWR) + if err != nil { + return result, writeError{err: fmt.Errorf("unable to marshal protobuf: %v", err)} + } + + encoded := snappy.Encode(nil, data) + result.PayloadSize = len(encoded) + + body := bytes.NewReader(encoded) + req, err := http.NewRequest("POST", c.writeURL, body) + if err != nil { + return result, writeError{err: err} + } + + req.Header.Set("Content-Type", "application/x-protobuf") + req.Header.Set("Content-Encoding", "snappy") + req.Header.Set("User-Agent", c.userAgent) + req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") + if opts.Headers != nil { + for k, v := range opts.Headers { + req.Header.Set(k, v) + } + } + + resp, err := c.httpClient.Do(req.WithContext(ctx)) + if err != nil { + return result, writeError{err: err} + } + + result.StatusCode = resp.StatusCode + + defer resp.Body.Close() + + if result.StatusCode < 200 || result.StatusCode > 299 { + writeErr := writeError{ + err: fmt.Errorf("expected 2xx status code: actual=%d", resp.StatusCode), + code: result.StatusCode, + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + writeErr.err = fmt.Errorf("%v, body_read_error=%s", writeErr.err, err) + return result, writeErr + } + + writeErr.err = fmt.Errorf("%v, body=%s", writeErr.err, body) + return result, writeErr + } + + return result, nil +} + +// toPromWriteRequest converts a list of timeseries to a Prometheus proto write request. +func (t TSList) toPromWriteRequest() *prompb.WriteRequest { + promTS := make([]prompb.TimeSeries, len(t)) + + for i, ts := range t { + labels := make([]prompb.Label, len(ts.Labels)) + for j, label := range ts.Labels { + labels[j] = prompb.Label{Name: label.Name, Value: label.Value} + } + + sample := []prompb.Sample{prompb.Sample{ + // Timestamp is int milliseconds for remote write. + Timestamp: ts.Datapoint.Timestamp.UnixNano() / int64(time.Millisecond), + Value: ts.Datapoint.Value, + }} + promTS[i] = prompb.TimeSeries{Labels: labels, Samples: sample} + } + + return &prompb.WriteRequest{ + Timeseries: promTS, + } +} + +type writeError struct { + err error + code int +} + +func (e writeError) Error() string { + return e.err.Error() +} + +// StatusCode returns the HTTP status code of the error if error +// was caused by the response, otherwise it will be just zero. +func (e writeError) StatusCode() int { + return e.code +} diff --git a/sinker/redis/consumer/docs.go b/sinker/redis/consumer/docs.go new file mode 100644 index 000000000..b78b46cb0 --- /dev/null +++ b/sinker/redis/consumer/docs.go @@ -0,0 +1 @@ +package consumer diff --git a/sinker/redis/consumer/events.go b/sinker/redis/consumer/events.go new file mode 100644 index 000000000..9d1639e90 --- /dev/null +++ b/sinker/redis/consumer/events.go @@ -0,0 +1,22 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +// Adapted for Orb project, modifications licensed under MPL v. 2.0: +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +package consumer + +import ( + "time" + + "github.com/orb-community/orb/pkg/types" +) + +type UpdateSinkEvent struct { + SinkID string + Owner string + Config types.Metadata + Timestamp time.Time +} diff --git a/sinker/redis/consumer/sink_key_expire.go b/sinker/redis/consumer/sink_key_expire.go deleted file mode 100644 index 8e2318c27..000000000 --- a/sinker/redis/consumer/sink_key_expire.go +++ /dev/null @@ -1,73 +0,0 @@ -package consumer - -import ( - "context" - "strconv" - - "github.com/go-redis/redis/v8" - "github.com/orb-community/orb/sinker/redis/producer" - "go.uber.org/zap" -) - -type SinkerKeyExpirationListener interface { - // SubscribeToKeyExpiration Listen to the sinker key expiration - SubscribeToKeyExpiration(ctx context.Context) error - // ReceiveMessage to be used to receive the message from the sinker key expiration, async - ReceiveMessage(ctx context.Context, message string) error -} - -type sinkerKeyExpirationListener struct { - logger *zap.Logger - cacheRedisClient *redis.Client - idleProducer producer.SinkIdleProducer -} - -func NewSinkerKeyExpirationListener(l *zap.Logger, cacheRedisClient *redis.Client, idleProducer producer.SinkIdleProducer) SinkerKeyExpirationListener { - logger := l.Named("sinker_key_expiration_listener") - return &sinkerKeyExpirationListener{logger: logger, cacheRedisClient: cacheRedisClient, idleProducer: idleProducer} -} - -// SubscribeToKeyExpiration to be used to subscribe to the sinker key expiration -func (s *sinkerKeyExpirationListener) SubscribeToKeyExpiration(ctx context.Context) error { - go func() { - redisDB := strconv.Itoa(s.cacheRedisClient.Options().DB) - pubsub := s.cacheRedisClient.PSubscribe(ctx, "__keyevent@"+redisDB+"__:expired") - defer func(pubsub *redis.PubSub) { - _ = pubsub.Close() - }(pubsub) - for { - select { - case <-ctx.Done(): - return - default: - msg, _ := pubsub.ReceiveMessage(ctx) - s.logger.Info("key expired", zap.String("key", msg.Payload)) - subCtx := context.WithValue(ctx, "msg", msg.Payload) - err := s.ReceiveMessage(subCtx, msg.Payload) - if err != nil { - s.logger.Error("error receiving message", zap.Error(err)) - return - } - } - } - }() - return nil -} - -// ReceiveMessage to be used to receive the message from the sinker key expiration -func (s *sinkerKeyExpirationListener) ReceiveMessage(ctx context.Context, message string) error { - // goroutine - go func(msg string) { - ownerID := message[15:51] - sinkID := message[52:] - event := producer.SinkIdleEvent{ - OwnerID: ownerID, - SinkID: sinkID, - State: "idle", - Size: "0", - } - s.logger.Info("publishing sink idle event", zap.Any("event", event)) - _ = s.idleProducer.PublishSinkIdle(ctx, event) - }(message) - return nil -} diff --git a/sinker/redis/consumer/streams.go b/sinker/redis/consumer/streams.go new file mode 100644 index 000000000..2faae6d84 --- /dev/null +++ b/sinker/redis/consumer/streams.go @@ -0,0 +1,222 @@ +package consumer + +import ( + "context" + "encoding/json" + "fmt" + "github.com/orb-community/orb/pkg/errors" + "time" + + "github.com/go-redis/redis/v8" + "github.com/orb-community/orb/pkg/types" + "github.com/orb-community/orb/sinker" + "github.com/orb-community/orb/sinker/config" + "go.uber.org/zap" +) + +const ( + stream = "orb.sinks" + group = "orb.sinker" + + sinksPrefix = "sinks." + sinksUpdate = sinksPrefix + "update" + sinksCreate = sinksPrefix + "create" + sinksDelete = sinksPrefix + "remove" + exists = "BUSYGROUP Consumer Group name already exists" +) + +type Subscriber interface { + Subscribe(context context.Context) error +} + +type eventStore struct { + otelEnabled bool + sinkerService sinker.Service + configRepo config.ConfigRepo + client *redis.Client + esconsumer string + logger *zap.Logger +} + +func (es eventStore) Subscribe(context context.Context) error { + subGroup := group + if es.otelEnabled { + subGroup = group + ".otel" + } + err := es.client.XGroupCreateMkStream(context, stream, subGroup, "$").Err() + if err != nil && err.Error() != exists { + return err + } + + for { + streams, err := es.client.XReadGroup(context, &redis.XReadGroupArgs{ + Group: subGroup, + Consumer: es.esconsumer, + Streams: []string{stream, ">"}, + Count: 100, + }).Result() + if err != nil || len(streams) == 0 { + continue + } + + for _, msg := range streams[0].Messages { + event := msg.Values + + var err error + switch event["operation"] { + case sinksCreate: + rte, derr := decodeSinksCreate(event) + if derr != nil { + err = derr + break + } + err = es.handleSinksCreate(context, rte) + case sinksUpdate: + rte, derr := decodeSinksUpdate(event) + if derr != nil { + err = derr + break + } + err = es.handleSinksUpdate(context, rte) + case sinksDelete: + rte, derr := decodeSinksRemove(event) + if derr != nil { + err = derr + break + } + err = es.handleSinksRemove(context, rte) + } + if err != nil { + es.logger.Error("Failed to handle event", zap.String("operation", event["operation"].(string)), zap.Error(err)) + continue + } + es.client.XAck(context, stream, subGroup, msg.ID) + } + } +} + +// NewEventStore returns new event store instance. +func NewEventStore(sinkerService sinker.Service, configRepo config.ConfigRepo, client *redis.Client, esconsumer string, log *zap.Logger) Subscriber { + return eventStore{ + sinkerService: sinkerService, + configRepo: configRepo, + client: client, + esconsumer: esconsumer, + logger: log, + } +} + +func decodeSinksCreate(event map[string]interface{}) (UpdateSinkEvent, error) { + val := UpdateSinkEvent{ + SinkID: read(event, "sink_id", ""), + Owner: read(event, "owner", ""), + Config: readMetadata(event, "config"), + Timestamp: time.Now(), + } + var metadata types.Metadata + if err := json.Unmarshal([]byte(read(event, "config", "")), &metadata); err != nil { + return UpdateSinkEvent{}, err + } + val.Config = metadata + return val, nil +} + +func decodeSinksUpdate(event map[string]interface{}) (UpdateSinkEvent, error) { + val := UpdateSinkEvent{ + SinkID: read(event, "sink_id", ""), + Owner: read(event, "owner", ""), + Timestamp: time.Now(), + } + var metadata types.Metadata + if err := json.Unmarshal([]byte(read(event, "config", "")), &metadata); err != nil { + return UpdateSinkEvent{}, err + } + val.Config = metadata + return val, nil +} + +func decodeSinksRemove(event map[string]interface{}) (UpdateSinkEvent, error) { + val := UpdateSinkEvent{ + SinkID: read(event, "sink_id", ""), + Owner: read(event, "owner", ""), + Timestamp: time.Now(), + } + return val, nil +} + +func (es eventStore) handleSinksRemove(_ context.Context, e UpdateSinkEvent) error { + if ok := es.configRepo.Exists(e.Owner, e.SinkID); ok { + err := es.configRepo.Remove(e.Owner, e.SinkID) + if err != nil { + es.logger.Error("error during remove sinker cache entry", zap.Error(err)) + return err + } + } else { + es.logger.Error("did not find any sinker cache entry for removal", + zap.String("key", fmt.Sprintf("sinker_key-%s-%s", e.Owner, e.SinkID))) + return errors.New("did not find any sinker cache entry for removal") + } + return nil +} + +func (es eventStore) handleSinksUpdate(_ context.Context, e UpdateSinkEvent) error { + var cfg config.SinkConfig + cfg.Config = types.FromMap(e.Config) + cfg.SinkID = e.SinkID + cfg.OwnerID = e.Owner + cfg.State = config.Unknown + if ok := es.configRepo.Exists(e.Owner, e.SinkID); ok { + sinkConfig, err := es.configRepo.Get(e.Owner, e.SinkID) + if err != nil { + return err + } + sinkConfig.Config = cfg.Config + if sinkConfig.OwnerID == "" { + sinkConfig.OwnerID = e.Owner + } + if sinkConfig.SinkID == "" { + sinkConfig.SinkID = e.SinkID + } + err = es.configRepo.Edit(sinkConfig) + if err != nil { + return err + } + } else { + err := es.configRepo.Add(cfg) + if err != nil { + return err + } + } + return nil +} + +func (es eventStore) handleSinksCreate(_ context.Context, e UpdateSinkEvent) error { + var cfg config.SinkConfig + cfg.Config = types.FromMap(e.Config) + cfg.SinkID = e.SinkID + cfg.OwnerID = e.Owner + cfg.State = config.Unknown + err := es.configRepo.Add(cfg) + if err != nil { + return err + } + + return nil +} + +func read(event map[string]interface{}, key, def string) string { + val, ok := event[key].(string) + if !ok { + return def + } + return val +} + +func readMetadata(event map[string]interface{}, key string) types.Metadata { + val, ok := event[key].(types.Metadata) + if !ok { + return types.Metadata{} + } + + return val +} diff --git a/sinker/redis/producer/docs.go b/sinker/redis/producer/docs.go new file mode 100644 index 000000000..30f1d3d99 --- /dev/null +++ b/sinker/redis/producer/docs.go @@ -0,0 +1 @@ +package producer diff --git a/sinker/redis/producer/events.go b/sinker/redis/producer/events.go new file mode 100644 index 000000000..79ead9a3d --- /dev/null +++ b/sinker/redis/producer/events.go @@ -0,0 +1,37 @@ +package producer + +import ( + "time" +) + +const ( + SinkerPrefix = "sinker." + SinkerUpdate = SinkerPrefix + "update" +) + +type event interface { + Encode() map[string]interface{} +} + +var ( + _ event = (*SinkerUpdateEvent)(nil) +) + +type SinkerUpdateEvent struct { + SinkID string + Owner string + State string + Msg string + Timestamp time.Time +} + +func (cse SinkerUpdateEvent) Encode() map[string]interface{} { + return map[string]interface{}{ + "sink_id": cse.SinkID, + "owner": cse.Owner, + "state": cse.State, + "msg": cse.Msg, + "timestamp": cse.Timestamp.Unix(), + "operation": SinkerUpdate, + } +} diff --git a/sinker/redis/producer/sink_ttl.go b/sinker/redis/producer/sink_ttl.go deleted file mode 100644 index 959e247ff..000000000 --- a/sinker/redis/producer/sink_ttl.go +++ /dev/null @@ -1,86 +0,0 @@ -package producer - -import ( - "context" - "fmt" - "github.com/go-redis/redis/v8" - "go.uber.org/zap" - "time" -) - -type SinkerKey struct { - OwnerID string - SinkID string - Size string - LastActivity time.Time -} - -func (s *SinkerKey) Encode() map[string]interface{} { - return map[string]interface{}{ - "owner_id": s.OwnerID, - "sink_id": s.SinkID, - "size": s.Size, - "last_activity": s.LastActivity.Format(time.RFC3339), - } -} - -const DefaultExpiration = 5 * time.Minute - -type SinkerKeyService interface { - // AddNewSinkerKey Add New Sinker Key with default Expiration of 5 minutes - AddNewSinkerKey(ctx context.Context, key SinkerKey) error - // RenewSinkerKey Increment Expiration of Sinker Key - RenewSinkerKey(ctx context.Context, key SinkerKey) error - // RenewSinkerKeyInternal Increment Expiration of Sinker Key - RenewSinkerKeyInternal(ctx context.Context, sink SinkerKey, expiration time.Duration) error -} - -type sinkerKeyService struct { - logger *zap.Logger - cacheRepository *redis.Client -} - -func NewSinkerKeyService(l *zap.Logger, cacheRepository *redis.Client) SinkerKeyService { - logger := l.Named("sinker_key_service") - return &sinkerKeyService{logger: logger, cacheRepository: cacheRepository} -} - -// RenewSinkerKey Increment Expiration of Sinker Key -func (s *sinkerKeyService) RenewSinkerKey(ctx context.Context, sink SinkerKey) error { - // If key does not exist, create new entry - key := fmt.Sprintf("orb.sinker.key-%s:%s", sink.OwnerID, sink.SinkID) - cmd := s.cacheRepository.Expire(ctx, key, DefaultExpiration) - if cmd.Err() != nil { - s.logger.Error("error sending event to sinker event store", zap.Error(cmd.Err())) - return cmd.Err() - } - return nil -} - -// RenewSinkerKeyInternal Increment Expiration of Sinker Key using custom expiration -func (s *sinkerKeyService) RenewSinkerKeyInternal(ctx context.Context, sink SinkerKey, expiration time.Duration) error { - // If key does not exist, create new entry - key := fmt.Sprintf("orb.sinker.key-%s:%s", sink.OwnerID, sink.SinkID) - cmd := s.cacheRepository.Expire(ctx, key, expiration) - if cmd.Err() != nil { - s.logger.Error("error sending event to sinker event store", zap.Error(cmd.Err())) - return cmd.Err() - } - return nil -} - -func (s *sinkerKeyService) AddNewSinkerKey(ctx context.Context, sink SinkerKey) error { - // Create sinker key in redis Hashset with default expiration of 5 minutes - key := fmt.Sprintf("orb.sinker.key-%s:%s", sink.OwnerID, sink.SinkID) - cmd := s.cacheRepository.HSet(ctx, key, sink.Encode()) - if cmd.Err() != nil { - s.logger.Error("error sending event to sinker event store", zap.Error(cmd.Err())) - return cmd.Err() - } - err := s.RenewSinkerKey(ctx, sink) - if err != nil { - s.logger.Error("error setting expiration to sinker event store", zap.Error(cmd.Err())) - return cmd.Err() - } - return nil -} diff --git a/sinker/redis/producer/sinker_activity.go b/sinker/redis/producer/sinker_activity.go deleted file mode 100644 index 545051674..000000000 --- a/sinker/redis/producer/sinker_activity.go +++ /dev/null @@ -1,66 +0,0 @@ -package producer - -import ( - "context" - "github.com/go-redis/redis/v8" - "go.uber.org/zap" - "time" -) - -type SinkActivityProducer interface { - // PublishSinkActivity to be used to publish the sink activity to the sinker, mainly used by Otel Bridge Service - PublishSinkActivity(ctx context.Context, event SinkActivityEvent) error -} - -type SinkActivityEvent struct { - OwnerID string - SinkID string - State string - Size string - Timestamp time.Time -} - -func (s *SinkActivityEvent) Encode() map[string]interface{} { - return map[string]interface{}{ - "owner_id": s.OwnerID, - "sink_id": s.SinkID, - "state": s.State, - "size": s.Size, - "timestamp": s.Timestamp.Format(time.RFC3339), - } -} - -var _ SinkActivityProducer = (*sinkActivityProducer)(nil) - -type sinkActivityProducer struct { - logger *zap.Logger - redisStreamClient *redis.Client - sinkTTL SinkerKeyService -} - -func NewSinkActivityProducer(l *zap.Logger, redisStreamClient *redis.Client, sinkTTL SinkerKeyService) SinkActivityProducer { - logger := l.Named("sink_activity_producer") - return &sinkActivityProducer{logger: logger, redisStreamClient: redisStreamClient, sinkTTL: sinkTTL} -} - -// PublishSinkActivity BridgeService will notify stream of sink activity -func (sp *sinkActivityProducer) PublishSinkActivity(ctx context.Context, event SinkActivityEvent) error { - const maxLen = 1000 - record := &redis.XAddArgs{ - Stream: "orb.sink_activity", - Values: event.Encode(), - MaxLen: maxLen, - Approx: true, - } - err := sp.redisStreamClient.XAdd(ctx, record).Err() - if err != nil { - sp.logger.Error("error sending event to sinker event store", zap.Error(err)) - } - err = sp.sinkTTL.AddNewSinkerKey(ctx, SinkerKey{ - OwnerID: event.OwnerID, - SinkID: event.SinkID, - Size: event.Size, - LastActivity: event.Timestamp, - }) - return err -} diff --git a/sinker/redis/producer/sinker_idle.go b/sinker/redis/producer/sinker_idle.go deleted file mode 100644 index 9ca951850..000000000 --- a/sinker/redis/producer/sinker_idle.go +++ /dev/null @@ -1,58 +0,0 @@ -package producer - -import ( - "context" - "github.com/go-redis/redis/v8" - "go.uber.org/zap" - "time" -) - -type SinkIdleEvent struct { - OwnerID string - SinkID string - State string - Size string - Timestamp time.Time -} - -func (s *SinkIdleEvent) Encode() map[string]interface{} { - return map[string]interface{}{ - "owner_id": s.OwnerID, - "sink_id": s.SinkID, - "state": s.State, - "size": s.Size, - "timestamp": s.Timestamp.Format(time.RFC3339), - } -} - -type SinkIdleProducer interface { - // PublishSinkIdle to be used to publish the sink activity to the sinker, mainly used by Otel Bridge Service - PublishSinkIdle(ctx context.Context, event SinkIdleEvent) error -} - -var _ SinkIdleProducer = (*sinkIdleProducer)(nil) - -type sinkIdleProducer struct { - logger *zap.Logger - redisStreamClient *redis.Client -} - -func NewSinkIdleProducer(l *zap.Logger, redisStreamClient *redis.Client) SinkIdleProducer { - logger := l.Named("sink_idle_producer") - return &sinkIdleProducer{logger: logger, redisStreamClient: redisStreamClient} -} - -func (s *sinkIdleProducer) PublishSinkIdle(ctx context.Context, event SinkIdleEvent) error { - const maxLen = 1000 - record := &redis.XAddArgs{ - Stream: "orb.sink_idle", - Values: event.Encode(), - MaxLen: maxLen, - Approx: true, - } - err := s.redisStreamClient.XAdd(ctx, record).Err() - if err != nil { - s.logger.Error("error sending event to sinker event store", zap.Error(err)) - } - return err -} diff --git a/sinker/redis/producer/streams.go b/sinker/redis/producer/streams.go new file mode 100644 index 000000000..39c14fe1c --- /dev/null +++ b/sinker/redis/producer/streams.go @@ -0,0 +1,160 @@ +package producer + +import ( + "context" + "time" + + "github.com/go-redis/redis/v8" + "github.com/orb-community/orb/sinker/config" + "go.uber.org/zap" +) + +const ( + streamID = "orb.sinker" + streamLen = 1000 +) + +var _ config.ConfigRepo = (*eventStore)(nil) + +type eventStore struct { + sinkCache config.ConfigRepo + client *redis.Client + logger *zap.Logger +} + +// DeployCollector only used in maestro +func (e eventStore) DeployCollector(ctx context.Context, config config.SinkConfig) error { + err := e.sinkCache.Edit(config) + if err != nil { + return err + } + + eventToSink := SinkerUpdateEvent{ + SinkID: config.SinkID, + Owner: config.OwnerID, + State: config.State.String(), + Msg: config.Msg, + Timestamp: time.Now(), + } + recordToSink := &redis.XAddArgs{ + Stream: streamID, + Values: eventToSink.Encode(), + MaxLen: streamLen, + Approx: true, + } + err = e.client.XAdd(ctx, recordToSink).Err() + if err != nil { + e.logger.Error("error sending event to sinker event store", zap.Error(err)) + } + + return nil +} + +func (e eventStore) Exists(ownerID string, sinkID string) bool { + return e.sinkCache.Exists(ownerID, sinkID) +} + +func (e eventStore) Add(config config.SinkConfig) error { + err := e.sinkCache.Add(config) + if err != nil { + return err + } + + event := SinkerUpdateEvent{ + SinkID: config.SinkID, + Owner: config.OwnerID, + State: config.State.String(), + Msg: config.Msg, + Timestamp: time.Now(), + } + record := &redis.XAddArgs{ + Stream: streamID, + Values: event.Encode(), + MaxLen: streamLen, + Approx: true, + } + err = e.client.XAdd(context.Background(), record).Err() + if err != nil { + e.logger.Error("error sending event to event store", zap.Error(err)) + } + return nil +} + +func (e eventStore) Remove(ownerID string, sinkID string) error { + err := e.sinkCache.Remove(ownerID, sinkID) + if err != nil { + return err + } + + event := SinkerUpdateEvent{ + SinkID: sinkID, + Owner: ownerID, + State: config.Idle.String(), + Timestamp: time.Now(), + } + record := &redis.XAddArgs{ + Stream: streamID, + Values: event.Encode(), + MaxLen: streamLen, + Approx: true, + } + err = e.client.XAdd(context.Background(), record).Err() + if err != nil { + e.logger.Error("error sending event to event store", zap.Error(err)) + } + return nil +} + +func (e eventStore) Get(ownerID string, sinkID string) (config.SinkConfig, error) { + return e.sinkCache.Get(ownerID, sinkID) +} + +func (e eventStore) Edit(config config.SinkConfig) error { + err := e.sinkCache.Edit(config) + if err != nil { + return err + } + + event := SinkerUpdateEvent{ + SinkID: config.SinkID, + Owner: config.OwnerID, + State: config.State.String(), + Msg: config.Msg, + Timestamp: time.Now(), + } + record := &redis.XAddArgs{ + Stream: streamID, + Values: event.Encode(), + MaxLen: streamLen, + Approx: true, + } + err = e.client.XAdd(context.Background(), record).Err() + if err != nil { + e.logger.Error("error sending event to event store", zap.Error(err)) + } + return nil +} + +func (e eventStore) GetActivity(ownerID string, sinkID string) (int64, error) { + return e.sinkCache.GetActivity(ownerID, sinkID) +} + +func (e eventStore) AddActivity(ownerID string, sinkID string) error { + return e.sinkCache.AddActivity(ownerID, sinkID) +} + +func (e eventStore) GetAll(ownerID string) ([]config.SinkConfig, error) { + return e.sinkCache.GetAll(ownerID) +} + +func (e eventStore) GetAllOwners() ([]string, error) { + return e.sinkCache.GetAllOwners() +} + +func NewEventStoreMiddleware(repo config.ConfigRepo, client *redis.Client, logger *zap.Logger) config.ConfigRepo { + return eventStore{ + sinkCache: repo, + client: client, + logger: logger, + } +} diff --git a/sinker/redis/setup_test.go b/sinker/redis/setup_test.go index 197abab9a..292b51f89 100644 --- a/sinker/redis/setup_test.go +++ b/sinker/redis/setup_test.go @@ -34,7 +34,7 @@ func TestMain(m *testing.M) { }) return redisClient.Ping(context.Background()).Err() }); err != nil { - logger.Fatal("could not connect to docker: %s", zap.Error(err)) + logger.Fatal("could not conncet to docker: %s", zap.Error(err)) } code := m.Run() @@ -45,38 +45,3 @@ func TestMain(m *testing.M) { os.Exit(code) } - -func OnceReceiver(ctx context.Context, streamID string) error { - go func() { - count := 0 - err := redisClient.XGroupCreateMkStream(ctx, streamID, "unit-test", "$").Err() - if err != nil { - logger.Warn("error during create group", zap.Error(err)) - } - for { - // Redis Subscribe to stream - if redisClient != nil { - // create the group, or ignore if it already exists - streams, err := redisClient.XReadGroup(ctx, &redis.XReadGroupArgs{ - Consumer: "test_consumer", - Group: "unit-test", - Streams: []string{streamID, ">"}, - Count: 10, - }).Result() - if err != nil || len(streams) == 0 { - continue - } - for _, stream := range streams { - for _, msg := range stream.Messages { - logger.Info("received message", zap.Any("message", msg.Values)) - count++ - } - } - if count > 0 { - return - } - } - } - }() - return nil -} diff --git a/sinker/redis/sinker.go b/sinker/redis/sinker.go new file mode 100644 index 000000000..d180f61e4 --- /dev/null +++ b/sinker/redis/sinker.go @@ -0,0 +1,185 @@ +package redis + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + "time" + + "github.com/orb-community/orb/sinker/redis/producer" + + "github.com/go-redis/redis/v8" + "github.com/orb-community/orb/sinker" + sinkerconfig "github.com/orb-community/orb/sinker/config" + "go.uber.org/zap" +) + +const ( + keyPrefix = "sinker_key" + activityPrefix = "sinker_activity" + idPrefix = "orb.maestro" + streamLen = 1000 +) + +var _ sinkerconfig.ConfigRepo = (*sinkerCache)(nil) + +type sinkerCache struct { + client *redis.Client + logger *zap.Logger +} + +func NewSinkerCache(client *redis.Client, logger *zap.Logger) sinkerconfig.ConfigRepo { + return &sinkerCache{client: client, logger: logger} +} + +func (s *sinkerCache) Exists(ownerID string, sinkID string) bool { + sinkConfig, err := s.Get(ownerID, sinkID) + if err != nil { + return false + } + if sinkConfig.SinkID != "" { + return true + } + return false +} + +func (s *sinkerCache) Add(config sinkerconfig.SinkConfig) error { + skey := fmt.Sprintf("%s-%s:%s", keyPrefix, config.OwnerID, config.SinkID) + bytes, err := json.Marshal(config) + if err != nil { + return err + } + if err = s.client.Set(context.Background(), skey, bytes, 0).Err(); err != nil { + return err + } + return nil +} + +func (s *sinkerCache) Remove(ownerID string, sinkID string) error { + skey := fmt.Sprintf("%s-%s:%s", keyPrefix, ownerID, sinkID) + if err := s.client.Del(context.Background(), skey).Err(); err != nil { + return err + } + return nil +} + +func (s *sinkerCache) Get(ownerID string, sinkID string) (sinkerconfig.SinkConfig, error) { + if ownerID == "" || sinkID == "" { + return sinkerconfig.SinkConfig{}, sinker.ErrNotFound + } + skey := fmt.Sprintf("%s-%s:%s", keyPrefix, ownerID, sinkID) + cachedConfig, err := s.client.Get(context.Background(), skey).Result() + if err != nil { + return sinkerconfig.SinkConfig{}, err + } + var cfgSinker sinkerconfig.SinkConfig + if err := json.Unmarshal([]byte(cachedConfig), &cfgSinker); err != nil { + return sinkerconfig.SinkConfig{}, err + } + return cfgSinker, nil +} + +func (s *sinkerCache) Edit(config sinkerconfig.SinkConfig) error { + if err := s.Remove(config.OwnerID, config.SinkID); err != nil { + return err + } + if err := s.Add(config); err != nil { + return err + } + return nil +} + +// check collector activity + +func (s *sinkerCache) GetActivity(ownerID string, sinkID string) (int64, error) { + if ownerID == "" || sinkID == "" { + return 0, errors.New("invalid parameters") + } + skey := fmt.Sprintf("%s:%s", activityPrefix, sinkID) + secs, err := s.client.Get(context.Background(), skey).Result() + if err != nil { + return 0, err + } + lastActivity, _ := strconv.ParseInt(secs, 10, 64) + return lastActivity, nil +} + +func (s *sinkerCache) AddActivity(ownerID string, sinkID string) error { + if ownerID == "" || sinkID == "" { + return errors.New("invalid parameters") + } + defaultExpiration := time.Duration(10) * time.Minute + skey := fmt.Sprintf("%s:%s", activityPrefix, sinkID) + lastActivity := strconv.FormatInt(time.Now().Unix(), 10) + if err := s.client.Set(context.Background(), skey, lastActivity, defaultExpiration).Err(); err != nil { + return err + } + s.logger.Info("added activity for owner and sink ids", zap.String("owner", ownerID), zap.String("sinkID", sinkID)) + return nil +} + +// + +func (s *sinkerCache) DeployCollector(ctx context.Context, config sinkerconfig.SinkConfig) error { + event := producer.SinkerUpdateEvent{ + SinkID: config.SinkID, + Owner: config.OwnerID, + State: config.State.String(), + Msg: config.Msg, + Timestamp: time.Now(), + } + encodeEvent := redis.XAddArgs{ + ID: config.SinkID, + Stream: idPrefix, + Values: event, + MaxLen: streamLen, + Approx: true, + } + if cmd := s.client.XAdd(ctx, &encodeEvent); cmd.Err() != nil { + return cmd.Err() + } + return nil +} + +func (s *sinkerCache) GetAllOwners() ([]string, error) { + iter := s.client.Scan(context.Background(), 0, fmt.Sprintf("%s-*", keyPrefix), 0).Iterator() + var owners []string + for iter.Next(context.Background()) { + keys := strings.Split(strings.TrimPrefix(iter.Val(), fmt.Sprintf("%s-", keyPrefix)), ":") + if len(keys) > 1 { + owners = append(owners, keys[0]) + } + } + if err := iter.Err(); err != nil { + s.logger.Error("failed to retrieve config", zap.Error(err)) + return owners, err + } + return owners, nil +} + +func (s *sinkerCache) GetAll(ownerID string) ([]sinkerconfig.SinkConfig, error) { + iter := s.client.Scan(context.Background(), 0, fmt.Sprintf("%s-%s:*", keyPrefix, ownerID), 0).Iterator() + var configs []sinkerconfig.SinkConfig + for iter.Next(context.Background()) { + keys := strings.Split(strings.TrimPrefix(iter.Val(), fmt.Sprintf("%s-", keyPrefix)), ":") + sinkID := "" + if len(keys) > 1 { + sinkID = keys[1] + } + cfg, err := s.Get(ownerID, sinkID) + if err != nil { + s.logger.Error("failed to retrieve config", zap.Error(err)) + continue + } + configs = append(configs, cfg) + } + if err := iter.Err(); err != nil { + s.logger.Error("failed to retrieve config", zap.Error(err)) + return configs, err + } + + return configs, nil +} diff --git a/sinker/redis/sinker_test.go b/sinker/redis/sinker_test.go index 09b39903b..2088cc9e7 100644 --- a/sinker/redis/sinker_test.go +++ b/sinker/redis/sinker_test.go @@ -1,60 +1,208 @@ package redis_test import ( - "context" "fmt" - + "github.com/orb-community/orb/pkg/types" "testing" "time" - "github.com/orb-community/orb/sinker/redis/producer" - + "github.com/mainflux/mainflux/pkg/uuid" + "github.com/orb-community/orb/pkg/errors" + config2 "github.com/orb-community/orb/sinker/config" + "github.com/orb-community/orb/sinker/redis" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestSinkActivityStoreAndMessage(t *testing.T) { - // Create SinkActivityService - sinkTTLSvc := producer.NewSinkerKeyService(logger, redisClient) - sinkActivitySvc := producer.NewSinkActivityProducer(logger, redisClient, sinkTTLSvc) - args := []struct { - testCase string - event producer.SinkActivityEvent +var idProvider = uuid.New() + +func TestSinkerConfigSave(t *testing.T) { + sinkerCache := redis.NewSinkerCache(redisClient, logger) + var config config2.SinkConfig + config.SinkID = "123" + config.OwnerID = "test" + config.Config = types.Metadata{ + "authentication": types.Metadata{ + "password": "password", + "type": "basicauth", + "username": "user", + }, + "exporter": types.Metadata{ + "headers": map[string]string{ + "X-Tenant": "MY_TENANT_1", + }, + "remote_host": "localhost", + }, + "opentelemetry": "enabled", + } + + config.State = 0 + config.Msg = "" + config.LastRemoteWrite = time.Time{} + + err := sinkerCache.Add(config) + require.Nil(t, err, fmt.Sprintf("save sinker config to cache: expected nil got %s", err)) + + cases := map[string]struct { + config config2.SinkConfig + err error + }{ + "Save sinker to cache": { + config: config2.SinkConfig{ + SinkID: "124", + OwnerID: "test", + Config: config.Config, + State: 0, + Msg: "", + LastRemoteWrite: time.Time{}, + }, + err: nil, + }, + "Save already cached sinker config to cache": { + config: config, + err: nil, + }, + } + + for desc, tc := range cases { + t.Run(desc, func(t *testing.T) { + err := sinkerCache.Add(tc.config) + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s", desc, tc.err, err)) + }) + } +} + +func TestGetSinkerConfig(t *testing.T) { + sinkerCache := redis.NewSinkerCache(redisClient, logger) + var config config2.SinkConfig + config.SinkID = "123" + config.OwnerID = "test" + config.Config = types.Metadata{ + "authentication": types.Metadata{ + "password": "password", + "type": "basicauth", + "username": "user", + }, + "exporter": types.Metadata{ + "headers": map[string]string{ + "X-Tenant": "MY_TENANT_1", + }, + "remote_host": "localhost", + }, + "opentelemetry": "enabled", + } + config.State = 0 + config.Msg = "" + config.LastRemoteWrite = time.Time{} + + err := sinkerCache.Add(config) + require.Nil(t, err, fmt.Sprintf("save sinker config to cache: expected nil got %s", err)) + + cases := map[string]struct { + sinkID string + config config2.SinkConfig + err error }{ - { - testCase: "sink activity for new sink", - event: producer.SinkActivityEvent{ - OwnerID: "1", - SinkID: "1", - State: "active", - Size: "40", - Timestamp: time.Now(), + "Get Config by existing sinker-key": { + sinkID: "123", + config: config, + err: nil, + }, + "Get Config by non-existing sinker-key": { + sinkID: "000", + config: config2.SinkConfig{}, + err: errors.ErrNotFound, + }, + } + + for desc, tc := range cases { + t.Run(desc, func(t *testing.T) { + sinkConfig, err := sinkerCache.Get(tc.config.OwnerID, tc.sinkID) + assert.Equal(t, tc.config.SinkID, sinkConfig.SinkID, fmt.Sprintf("%s: expected %s got %s", desc, tc.config.SinkID, sinkConfig.SinkID)) + assert.Equal(t, tc.config.State, sinkConfig.State, fmt.Sprintf("%s: expected %s got %s", desc, tc.config.State, sinkConfig.State)) + assert.Equal(t, tc.config.OwnerID, sinkConfig.OwnerID, fmt.Sprintf("%s: expected %s got %s", desc, tc.config.OwnerID, sinkConfig.OwnerID)) + assert.Equal(t, tc.config.Msg, sinkConfig.Msg, fmt.Sprintf("%s: expected %s got %s", desc, tc.config.Msg, sinkConfig.Msg)) + assert.Equal(t, tc.config.LastRemoteWrite, sinkConfig.LastRemoteWrite, fmt.Sprintf("%s: expected %s got %s", desc, tc.config.LastRemoteWrite, sinkConfig.LastRemoteWrite)) + if tc.config.Config != nil { + _, ok := sinkConfig.Config["authentication"] + assert.True(t, ok, fmt.Sprintf("%s: should contain authentication metadata", desc)) + _, ok = sinkConfig.Config["exporter"] + assert.True(t, ok, fmt.Sprintf("%s: should contain exporter metadata", desc)) + } + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s", desc, tc.err, err)) + }) + } +} + +func TestGetAllSinkerConfig(t *testing.T) { + sinkerCache := redis.NewSinkerCache(redisClient, logger) + var config config2.SinkConfig + config.SinkID = "123" + config.OwnerID = "test" + config.State = 0 + config.Msg = "" + config.Config = types.Metadata{ + "authentication": types.Metadata{ + "password": "password", + "type": "basicauth", + "username": "user", + }, + "exporter": types.Metadata{ + "headers": map[string]string{ + "X-Tenant": "MY_TENANT_1", }, + "remote_host": "localhost", }, - { - testCase: "sink activity for existing sink", - event: producer.SinkActivityEvent{ - OwnerID: "1", - SinkID: "1", - State: "active", - Size: "55", - Timestamp: time.Now(), + "opentelemetry": "enabled", + } + config.LastRemoteWrite = time.Time{} + sinksConfig := map[string]struct { + config config2.SinkConfig + }{ + "config 1": { + config: config2.SinkConfig{ + SinkID: "123", + OwnerID: "test", + Config: config.Config, + State: 0, + Msg: "", + LastRemoteWrite: time.Time{}, }, }, - { - testCase: "sink activity for another new sink", - event: producer.SinkActivityEvent{ - OwnerID: "2", - SinkID: "1", - State: "active", - Size: "37", - Timestamp: time.Now(), + "config 2": { + config: config2.SinkConfig{ + SinkID: "134", + OwnerID: "test", + Config: config.Config, + State: 0, + Msg: "", + LastRemoteWrite: time.Time{}, }, }, } - for _, tt := range args { - ctx := context.WithValue(context.Background(), "test_case", tt.testCase) - err := sinkActivitySvc.PublishSinkActivity(ctx, tt.event) - require.NoError(t, err, fmt.Sprintf("%s: unexpected error: %s", tt.testCase, err)) + + for _, val := range sinksConfig { + err := sinkerCache.Add(val.config) + require.Nil(t, err, fmt.Sprintf("save sinker config to cache: expected nil got %s", err)) + } + + cases := map[string]struct { + size int + ownerID string + err error + }{ + "Get Config by existing sinker-key": { + size: 2, + ownerID: "test", + err: nil, + }, + } + + for desc, tc := range cases { + t.Run(desc, func(t *testing.T) { + sinksConfig, err := sinkerCache.GetAll(tc.ownerID) + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error: %s", desc, err)) + assert.GreaterOrEqual(t, len(sinksConfig), tc.size, fmt.Sprintf("%s: expected %d got %d", desc, tc.size, len(sinksConfig))) + }) } - logger.Debug("debugging breakpoint") } diff --git a/sinker/service.go b/sinker/service.go index adb0876a8..db30bd2b8 100644 --- a/sinker/service.go +++ b/sinker/service.go @@ -6,25 +6,33 @@ package sinker import ( "context" + "errors" "fmt" "time" - "github.com/orb-community/orb/sinker/redis/consumer" - "github.com/orb-community/orb/sinker/redis/producer" - "github.com/go-kit/kit/metrics" "github.com/go-redis/redis/v8" mfnats "github.com/mainflux/mainflux/pkg/messaging/nats" fleetpb "github.com/orb-community/orb/fleet/pb" policiespb "github.com/orb-community/orb/policies/pb" + "github.com/orb-community/orb/sinker/backend/pktvisor" + "github.com/orb-community/orb/sinker/config" "github.com/orb-community/orb/sinker/otel" "github.com/orb-community/orb/sinker/otel/bridgeservice" + "github.com/orb-community/orb/sinker/prometheus" sinkspb "github.com/orb-community/orb/sinks/pb" "go.uber.org/zap" ) const ( - OtelMetricsTopic = "otlp.*.m.>" + BackendMetricsTopic = "be.*.m.>" + OtelMetricsTopic = "otlp.*.m.>" + MaxMsgPayloadSize = 1048 * 1000 +) + +var ( + ErrPayloadTooBig = errors.New("payload too big") + ErrNotFound = errors.New("non-existent entity") ) type Service interface { @@ -41,16 +49,16 @@ type SinkerService struct { otelLogsCancelFunct context.CancelFunc otelKafkaUrl string + sinkerCache config.ConfigRepo inMemoryCacheExpiration time.Duration - streamClient *redis.Client - cacheClient *redis.Client - sinkTTLSvc producer.SinkerKeyService - sinkActivitySvc producer.SinkActivityProducer + esclient *redis.Client logger *zap.Logger hbTicker *time.Ticker hbDone chan bool + promClient prometheus.Client + policiesClient policiespb.PolicyServiceClient fleetClient fleetpb.FleetServiceClient sinksClient sinkspb.SinkServiceClient @@ -67,25 +75,21 @@ func (svc SinkerService) Start() error { ctx := context.WithValue(context.Background(), "routine", "async") ctx = context.WithValue(ctx, "cache_expiry", svc.inMemoryCacheExpiration) svc.asyncContext, svc.cancelAsyncContext = context.WithCancel(ctx) - - svc.sinkTTLSvc = producer.NewSinkerKeyService(svc.logger, svc.cacheClient) - svc.sinkActivitySvc = producer.NewSinkActivityProducer(svc.logger, svc.streamClient, svc.sinkTTLSvc) - // Create Handle and Listener to Redis Key Events - sinkerIdleProducer := producer.NewSinkIdleProducer(svc.logger, svc.streamClient) - sinkerKeyExpirationListener := consumer.NewSinkerKeyExpirationListener(svc.logger, svc.cacheClient, sinkerIdleProducer) - err := sinkerKeyExpirationListener.SubscribeToKeyExpiration(svc.asyncContext) - if err != nil { - svc.logger.Error("error on starting otel, exiting") - ctx.Done() - svc.cancelAsyncContext() - return err + if !svc.otel { + topic := fmt.Sprintf("channels.*.%s", BackendMetricsTopic) + if err := svc.pubSub.Subscribe(topic, svc.handleMsgFromAgent); err != nil { + return err + } + svc.logger.Info("started metrics consumer", zap.String("topic", topic)) } - err = svc.startOtel(svc.asyncContext) + svc.hbTicker = time.NewTicker(CheckerFreq) + svc.hbDone = make(chan bool) + go svc.checkSinker() + + err := svc.startOtel(svc.asyncContext) if err != nil { svc.logger.Error("error on starting otel, exiting") - ctx.Done() - svc.cancelAsyncContext() return err } @@ -96,7 +100,7 @@ func (svc SinkerService) startOtel(ctx context.Context) error { if svc.otel { var err error - bridgeService := bridgeservice.NewBridgeService(svc.logger, svc.inMemoryCacheExpiration, svc.sinkActivitySvc, + bridgeService := bridgeservice.NewBridgeService(svc.logger, svc.inMemoryCacheExpiration, svc.sinkerCache, svc.policiesClient, svc.sinksClient, svc.fleetClient, svc.messageInputCounter) svc.otelMetricsCancelFunct, err = otel.StartOtelMetricsComponents(ctx, &bridgeService, svc.logger, svc.otelKafkaUrl, svc.pubSub) @@ -112,9 +116,16 @@ func (svc SinkerService) startOtel(ctx context.Context) error { } func (svc SinkerService) Stop() error { - otelTopic := fmt.Sprintf("channels.*.%s", OtelMetricsTopic) - if err := svc.pubSub.Unsubscribe(otelTopic); err != nil { - return err + if svc.otel { + otelTopic := fmt.Sprintf("channels.*.%s", OtelMetricsTopic) + if err := svc.pubSub.Unsubscribe(otelTopic); err != nil { + return err + } + } else { + topic := fmt.Sprintf("channels.*.%s", BackendMetricsTopic) + if err := svc.pubSub.Unsubscribe(topic); err != nil { + return err + } } svc.logger.Info("unsubscribed from agent metrics") @@ -129,8 +140,8 @@ func (svc SinkerService) Stop() error { // New instantiates the sinker service implementation. func New(logger *zap.Logger, pubSub mfnats.PubSub, - streamsClient *redis.Client, - cacheClient *redis.Client, + esclient *redis.Client, + configRepo config.ConfigRepo, policiesClient policiespb.PolicyServiceClient, fleetClient fleetpb.FleetServiceClient, sinksClient sinkspb.SinkServiceClient, @@ -141,12 +152,14 @@ func New(logger *zap.Logger, inputCounter metrics.Counter, defaultCacheExpiration time.Duration, ) Service { + + pktvisor.Register(logger) return &SinkerService{ inMemoryCacheExpiration: defaultCacheExpiration, logger: logger, pubSub: pubSub, - streamClient: streamsClient, - cacheClient: cacheClient, + esclient: esclient, + sinkerCache: configRepo, policiesClient: policiesClient, fleetClient: fleetClient, sinksClient: sinksClient, diff --git a/sinks/api/http/logging.go b/sinks/api/http/logging.go index 2bde997f0..42256b88c 100644 --- a/sinks/api/http/logging.go +++ b/sinks/api/http/logging.go @@ -6,12 +6,11 @@ package http import ( "context" - "time" - "github.com/orb-community/orb/sinks" "github.com/orb-community/orb/sinks/authentication_type" "github.com/orb-community/orb/sinks/backend" "go.uber.org/zap" + "time" ) var _ sinks.SinkService = (*loggingMiddleware)(nil) @@ -28,7 +27,7 @@ func (l loggingMiddleware) ListSinksInternal(ctx context.Context, filter sinks.F zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: list_sinks_internal", + l.logger.Info("method call: list_sinks_internal", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -42,7 +41,7 @@ func (l loggingMiddleware) ChangeSinkStateInternal(ctx context.Context, sinkID s zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: change_sink_state_internal", + l.logger.Info("method call: change_sink_state_internal", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -56,7 +55,7 @@ func (l loggingMiddleware) CreateSink(ctx context.Context, token string, s sinks zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: create_sink", + l.logger.Info("method call: create_sink", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -70,7 +69,7 @@ func (l loggingMiddleware) UpdateSink(ctx context.Context, token string, s sinks zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: edit_sink", + l.logger.Info("method call: edit_sink", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -84,7 +83,7 @@ func (l loggingMiddleware) UpdateSinkInternal(ctx context.Context, s sinks.Sink) zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: edit_internal_sink", + l.logger.Info("method call: edit_internal_sink", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -98,7 +97,7 @@ func (l loggingMiddleware) ListSinks(ctx context.Context, token string, pm sinks zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: list_sinks", + l.logger.Info("method call: list_sinks", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -112,7 +111,7 @@ func (l loggingMiddleware) ListBackends(ctx context.Context, token string) (_ [] zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: list_backends", + l.logger.Info("method call: list_backends", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -126,7 +125,7 @@ func (l loggingMiddleware) ViewBackend(ctx context.Context, token string, key st zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_backend", + l.logger.Info("method call: view_backend", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -140,7 +139,7 @@ func (l loggingMiddleware) ViewSink(ctx context.Context, token string, key strin zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_sink", + l.logger.Warn("method call: view_sink", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -154,7 +153,7 @@ func (l loggingMiddleware) ViewSinkInternal(ctx context.Context, ownerID string, zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: view_sink_internal", + l.logger.Warn("method call: view_sink_internal", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -168,7 +167,7 @@ func (l loggingMiddleware) DeleteSink(ctx context.Context, token string, key str zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: delete_sink", + l.logger.Warn("method call: delete_sink", zap.Duration("duration", time.Since(begin))) } }(time.Now()) @@ -182,7 +181,7 @@ func (l loggingMiddleware) ValidateSink(ctx context.Context, token string, s sin zap.Error(err), zap.Duration("duration", time.Since(begin))) } else { - l.logger.Debug("method call: validate_sink", + l.logger.Info("method call: validate_sink", zap.Duration("duration", time.Since(begin))) } }(time.Now()) diff --git a/sinks/api/http/metrics.go b/sinks/api/http/metrics.go index 7ef0edcfb..51bac1ee3 100644 --- a/sinks/api/http/metrics.go +++ b/sinks/api/http/metrics.go @@ -6,8 +6,6 @@ package http import ( "context" - "time" - "github.com/go-kit/kit/metrics" "github.com/mainflux/mainflux" "github.com/orb-community/orb/pkg/errors" @@ -15,6 +13,7 @@ import ( "github.com/orb-community/orb/sinks/authentication_type" "github.com/orb-community/orb/sinks/backend" "go.uber.org/zap" + "time" ) var _ sinks.SinkService = (*metricsMiddleware)(nil) diff --git a/sinks/postgres/init.go b/sinks/postgres/init.go index 90588b5b7..cab3b5390 100644 --- a/sinks/postgres/init.go +++ b/sinks/postgres/init.go @@ -90,19 +90,6 @@ func migrateDB(db *sqlx.DB) error { "DROP TABLE current_version", }, }, - { - Id: "sinks_4", - Up: []string{ - `ALTER TYPE public.sinks_state ADD VALUE IF NOT EXISTS 'warning';`, - `ALTER TYPE public.sinks_state ADD VALUE IF NOT EXISTS 'provisioning';`, - `ALTER TYPE public.sinks_state ADD VALUE IF NOT EXISTS 'provisioning_error';`, - }, - Down: []string{ - `ALTER TYPE public.sinks_state DROP VALUE IF EXISTS 'warning';`, - `ALTER TYPE public.sinks_state DROP VALUE IF EXISTS 'provisioning';`, - `ALTER TYPE public.sinks_state DROP VALUE IF EXISTS 'provisioning_error';`, - }, - }, }, } diff --git a/sinks/postgres/sinks_test.go b/sinks/postgres/sinks_test.go index bdb4788d6..974d55e42 100644 --- a/sinks/postgres/sinks_test.go +++ b/sinks/postgres/sinks_test.go @@ -580,16 +580,8 @@ func TestUpdateSinkState(t *testing.T) { for desc, tc := range cases { t.Run(desc, func(t *testing.T) { - ctx := context.WithValue(context.Background(), "test", desc) err := sinkRepo.UpdateSinkState(context.Background(), tc.sinkID, tc.msg, tc.ownerID, tc.state) assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) - // only validate success scenarios - if tc.err == nil { - got, err := sinkRepo.RetrieveById(ctx, sinkID) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) - assert.Equal(t, tc.state, got.State, fmt.Sprintf("%s: expected state %d got %d", desc, tc.state, got.State)) - assert.Equal(t, tc.msg, got.Error, fmt.Sprintf("%s: expected msg %s got %s", desc, tc.msg, got.Error)) - } }) } diff --git a/sinks/redis/consumer/events.go b/sinks/redis/consumer/events.go new file mode 100644 index 000000000..a7702da0f --- /dev/null +++ b/sinks/redis/consumer/events.go @@ -0,0 +1,21 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +// Adapted for Orb project, modifications licensed under MPL v. 2.0: +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ +package consumer + +import ( + "github.com/orb-community/orb/sinks" + "time" +) + +type stateUpdateEvent struct { + ownerID string + sinkID string + state sinks.State + msg string + timestamp time.Time +} diff --git a/sinks/redis/consumer/sink_status_listener.go b/sinks/redis/consumer/sink_status_listener.go deleted file mode 100644 index 55fe32730..000000000 --- a/sinks/redis/consumer/sink_status_listener.go +++ /dev/null @@ -1,110 +0,0 @@ -package consumer - -import ( - "context" - "fmt" - - "github.com/go-redis/redis/v8" - "github.com/orb-community/orb/sinks" - redis2 "github.com/orb-community/orb/sinks/redis" - - "go.uber.org/zap" -) - -type SinkStatusListener interface { - SubscribeToMaestroSinkStatus(ctx context.Context) error - ReceiveMessage(ctx context.Context, message redis.XMessage) error -} - -type sinkStatusListener struct { - logger *zap.Logger - streamClient *redis.Client - sinkService sinks.SinkService -} - -func NewSinkStatusListener(l *zap.Logger, streamClient *redis.Client, sinkService sinks.SinkService) SinkStatusListener { - logger := l.Named("sink_status_listener") - return &sinkStatusListener{ - logger: logger, - streamClient: streamClient, - sinkService: sinkService, - } -} - -func (s *sinkStatusListener) SubscribeToMaestroSinkStatus(ctx context.Context) error { - // First will create consumer group - groupName := "orb.sinks" - streamName := "orb.maestro.sink_status" - consumerName := "sinks_consumer" - err := s.streamClient.XGroupCreateMkStream(ctx, streamName, groupName, "$").Err() - if err != nil && err.Error() != redis2.Exists { - s.logger.Error("failed to create group", zap.Error(err)) - return err - } - go func(rLogger *zap.Logger) { - for { - select { - case <-ctx.Done(): - rLogger.Info("closing sink_status_listener routine") - return - default: - streams, err := s.streamClient.XReadGroup(ctx, &redis.XReadGroupArgs{ - Group: groupName, - Consumer: consumerName, - Streams: []string{streamName, ">"}, - Count: 1000, - }).Result() - if err != nil || len(streams) == 0 { - if err != nil { - rLogger.Error("failed to read group", zap.Error(err)) - } - continue - } - for _, msg := range streams[0].Messages { - err = s.ReceiveMessage(ctx, msg) - if err != nil { - rLogger.Error("failed to process message", zap.Error(err)) - } - } - } - } - }(s.logger.Named("goroutine_sink_status_listener")) - return nil -} - -func (s *sinkStatusListener) ReceiveMessage(ctx context.Context, message redis.XMessage) error { - logger := s.logger.Named(fmt.Sprintf("sink_status_msg:%s", message.ID)) - go func(ctx context.Context, logger *zap.Logger, message redis.XMessage) { - event := s.decodeMessage(message.Values) - logger.Info("received message from maestro", zap.String("owner_id", event.OwnerID), - zap.String("sink_id", event.SinkID), zap.String("state", event.State), zap.String("msg", event.Msg)) - gotSink, err := s.sinkService.ViewSinkInternal(ctx, event.OwnerID, event.SinkID) - if err != nil { - logger.Error("failed to get sink for sink_id from message", zap.String("owner_id", event.OwnerID), - zap.String("sink_id", event.SinkID), zap.Error(err)) - return - } - newState := sinks.NewStateFromString(event.State) - if newState == sinks.Error || newState == sinks.ProvisioningError || newState == sinks.Warning { - gotSink.Error = event.Msg - } - gotSink.State = newState - err = s.sinkService.ChangeSinkStateInternal(ctx, gotSink.ID, gotSink.Error, gotSink.MFOwnerID, gotSink.State) - if err != nil { - logger.Error("failed to update sink", zap.String("owner_id", event.OwnerID), - zap.String("sink_id", event.SinkID), zap.Error(err)) - return - } - }(ctx, logger, message) - return nil -} - -// func (es eventStore) decodeSinkerStateUpdate(event map[string]interface{}) *sinks.SinkerStateUpdate { -func (s *sinkStatusListener) decodeMessage(content map[string]interface{}) redis2.StateUpdateEvent { - return redis2.StateUpdateEvent{ - OwnerID: content["owner_id"].(string), - SinkID: content["sink_id"].(string), - State: content["status"].(string), - Msg: content["error_message"].(string), - } -} diff --git a/sinks/redis/consumer/streams.go b/sinks/redis/consumer/streams.go index 27053c941..4a5790d4f 100644 --- a/sinks/redis/consumer/streams.go +++ b/sinks/redis/consumer/streams.go @@ -2,7 +2,6 @@ package consumer import ( "context" - redis2 "github.com/orb-community/orb/sinks/redis" "time" "github.com/go-redis/redis/v8" @@ -75,23 +74,26 @@ func (es eventStore) Subscribe(context context.Context) error { } } -func (es eventStore) handleSinkerStateUpdate(ctx context.Context, event redis2.StateUpdateEvent) error { - state := sinks.NewStateFromString(event.State) - err := es.sinkService.ChangeSinkStateInternal(ctx, event.SinkID, event.Msg, event.OwnerID, state) +func (es eventStore) handleSinkerStateUpdate(ctx context.Context, event stateUpdateEvent) error { + err := es.sinkService.ChangeSinkStateInternal(ctx, event.sinkID, event.msg, event.ownerID, event.state) if err != nil { return err } return nil } -func (es eventStore) decodeSinkerStateUpdate(event map[string]interface{}) redis2.StateUpdateEvent { - val := redis2.StateUpdateEvent{ - OwnerID: read(event, "owner", ""), - SinkID: read(event, "sink_id", ""), - Msg: read(event, "msg", ""), - Timestamp: time.Time{}, +func (es eventStore) decodeSinkerStateUpdate(event map[string]interface{}) stateUpdateEvent { + val := stateUpdateEvent{ + ownerID: read(event, "owner", ""), + sinkID: read(event, "sink_id", ""), + msg: read(event, "msg", ""), + timestamp: time.Time{}, + } + err := val.state.Scan(event["state"]) + if err != nil { + es.logger.Error("error parsing the state", zap.Error(err)) + return stateUpdateEvent{} } - val.State = event["state"].(string) return val } diff --git a/sinks/redis/events.go b/sinks/redis/events.go deleted file mode 100644 index 3f6f3cc91..000000000 --- a/sinks/redis/events.go +++ /dev/null @@ -1,64 +0,0 @@ -package redis - -import ( - "encoding/json" - "github.com/orb-community/orb/maestro/redis" - "github.com/orb-community/orb/pkg/types" - "time" -) - -const ( - SinkPrefix = "sinks." - SinkCreate = SinkPrefix + "create" - SinkDelete = SinkPrefix + "remove" - SinkUpdate = SinkPrefix + "update" - StreamSinks = "orb.sinks" - GroupMaestro = "orb.maestro" - Exists = "BUSYGROUP Consumer Group name already exists" -) - -type StateUpdateEvent struct { - OwnerID string - SinkID string - State string - Msg string - Timestamp time.Time -} - -func DecodeSinksEvent(event map[string]interface{}, operation string) (redis.SinksUpdateEvent, error) { - val := redis.SinksUpdateEvent{ - SinkID: read(event, "sink_id", ""), - Owner: read(event, "owner", ""), - Backend: read(event, "backend", ""), - Config: readMetadata(event, "config"), - Timestamp: time.Now(), - } - if operation != SinkDelete { - var metadata types.Metadata - if err := json.Unmarshal([]byte(read(event, "config", "")), &metadata); err != nil { - return redis.SinksUpdateEvent{}, err - } - val.Config = metadata - return val, nil - } - - return val, nil -} - -func read(event map[string]interface{}, key, def string) string { - val, ok := event[key].(string) - if !ok { - return def - } - - return val -} - -func readMetadata(event map[string]interface{}, key string) types.Metadata { - val, ok := event[key].(types.Metadata) - if !ok { - return types.Metadata{} - } - - return val -} diff --git a/sinks/redis/producer/events.go b/sinks/redis/producer/events.go index 0e0306d53..acec59a7c 100644 --- a/sinks/redis/producer/events.go +++ b/sinks/redis/producer/events.go @@ -33,7 +33,6 @@ var ( type createSinkEvent struct { sinkID string owner string - backend string config types.Metadata timestamp time.Time } @@ -46,7 +45,6 @@ func (cce createSinkEvent) Encode() (map[string]interface{}, error) { return map[string]interface{}{ "sink_id": cce.sinkID, "owner": cce.owner, - "backend": cce.backend, "config": config, "timestamp": cce.timestamp.Unix(), "operation": SinkCreate, @@ -70,7 +68,6 @@ type updateSinkEvent struct { sinkID string owner string config types.Metadata - backend string timestamp time.Time } @@ -83,7 +80,6 @@ func (cce updateSinkEvent) Encode() (map[string]interface{}, error) { "sink_id": cce.sinkID, "owner": cce.owner, "config": config, - "backend": cce.backend, "timestamp": cce.timestamp.Unix(), "operation": SinkUpdate, }, nil diff --git a/sinks/redis/producer/streams.go b/sinks/redis/producer/streams.go index 01b68dff4..7086deb19 100644 --- a/sinks/redis/producer/streams.go +++ b/sinks/redis/producer/streams.go @@ -10,7 +10,6 @@ package producer import ( "context" - "github.com/orb-community/orb/sinks/authentication_type" "github.com/go-redis/redis/v8" @@ -24,34 +23,33 @@ const ( streamLen = 1000 ) -var _ sinks.SinkService = (*sinksStreamProducer)(nil) +var _ sinks.SinkService = (*eventStore)(nil) -type sinksStreamProducer struct { +type eventStore struct { svc sinks.SinkService client *redis.Client logger *zap.Logger } // ListSinksInternal will only call following service -func (es sinksStreamProducer) ListSinksInternal(ctx context.Context, filter sinks.Filter) ([]sinks.Sink, error) { +func (es eventStore) ListSinksInternal(ctx context.Context, filter sinks.Filter) ([]sinks.Sink, error) { return es.svc.ListSinksInternal(ctx, filter) } -func (es sinksStreamProducer) ChangeSinkStateInternal(ctx context.Context, sinkID string, msg string, ownerID string, state sinks.State) error { +func (es eventStore) ChangeSinkStateInternal(ctx context.Context, sinkID string, msg string, ownerID string, state sinks.State) error { return es.svc.ChangeSinkStateInternal(ctx, sinkID, msg, ownerID, state) } -func (es sinksStreamProducer) ViewSinkInternal(ctx context.Context, ownerID string, key string) (sinks.Sink, error) { +func (es eventStore) ViewSinkInternal(ctx context.Context, ownerID string, key string) (sinks.Sink, error) { return es.svc.ViewSinkInternal(ctx, ownerID, key) } -func (es sinksStreamProducer) CreateSink(ctx context.Context, token string, s sinks.Sink) (sink sinks.Sink, err error) { +func (es eventStore) CreateSink(ctx context.Context, token string, s sinks.Sink) (sink sinks.Sink, err error) { defer func() { event := createSinkEvent{ - sinkID: sink.ID, - owner: sink.MFOwnerID, - config: sink.Config, - backend: sink.Backend, + sinkID: sink.ID, + owner: sink.MFOwnerID, + config: sink.Config, } encode, err := event.Encode() @@ -76,13 +74,12 @@ func (es sinksStreamProducer) CreateSink(ctx context.Context, token string, s si return es.svc.CreateSink(ctx, token, s) } -func (es sinksStreamProducer) UpdateSinkInternal(ctx context.Context, s sinks.Sink) (sink sinks.Sink, err error) { +func (es eventStore) UpdateSinkInternal(ctx context.Context, s sinks.Sink) (sink sinks.Sink, err error) { defer func() { event := updateSinkEvent{ - sinkID: sink.ID, - owner: sink.MFOwnerID, - config: sink.Config, - backend: sink.Backend, + sinkID: sink.ID, + owner: sink.MFOwnerID, + config: sink.Config, } encode, err := event.Encode() @@ -105,7 +102,7 @@ func (es sinksStreamProducer) UpdateSinkInternal(ctx context.Context, s sinks.Si return es.svc.UpdateSinkInternal(ctx, s) } -func (es sinksStreamProducer) UpdateSink(ctx context.Context, token string, s sinks.Sink) (sink sinks.Sink, err error) { +func (es eventStore) UpdateSink(ctx context.Context, token string, s sinks.Sink) (sink sinks.Sink, err error) { defer func() { event := updateSinkEvent{ sinkID: sink.ID, @@ -133,35 +130,35 @@ func (es sinksStreamProducer) UpdateSink(ctx context.Context, token string, s si return es.svc.UpdateSink(ctx, token, s) } -func (es sinksStreamProducer) ListSinks(ctx context.Context, token string, pm sinks.PageMetadata) (sinks.Page, error) { +func (es eventStore) ListSinks(ctx context.Context, token string, pm sinks.PageMetadata) (sinks.Page, error) { return es.svc.ListSinks(ctx, token, pm) } -func (es sinksStreamProducer) ListAuthenticationTypes(ctx context.Context, token string) ([]authentication_type.AuthenticationTypeConfig, error) { +func (es eventStore) ListAuthenticationTypes(ctx context.Context, token string) ([]authentication_type.AuthenticationTypeConfig, error) { return es.svc.ListAuthenticationTypes(ctx, token) } -func (es sinksStreamProducer) ViewAuthenticationType(ctx context.Context, token string, key string) (authentication_type.AuthenticationTypeConfig, error) { +func (es eventStore) ViewAuthenticationType(ctx context.Context, token string, key string) (authentication_type.AuthenticationTypeConfig, error) { return es.svc.ViewAuthenticationType(ctx, token, key) } -func (es sinksStreamProducer) ListBackends(ctx context.Context, token string) (_ []string, err error) { +func (es eventStore) ListBackends(ctx context.Context, token string) (_ []string, err error) { return es.svc.ListBackends(ctx, token) } -func (es sinksStreamProducer) ViewBackend(ctx context.Context, token string, key string) (_ backend.Backend, err error) { +func (es eventStore) ViewBackend(ctx context.Context, token string, key string) (_ backend.Backend, err error) { return es.svc.ViewBackend(ctx, token, key) } -func (es sinksStreamProducer) ViewSink(ctx context.Context, token string, key string) (_ sinks.Sink, err error) { +func (es eventStore) ViewSink(ctx context.Context, token string, key string) (_ sinks.Sink, err error) { return es.svc.ViewSink(ctx, token, key) } -func (es sinksStreamProducer) GetLogger() *zap.Logger { +func (es eventStore) GetLogger() *zap.Logger { return es.logger } -func (es sinksStreamProducer) DeleteSink(ctx context.Context, token, id string) (err error) { +func (es eventStore) DeleteSink(ctx context.Context, token, id string) (err error) { sink, err := es.svc.ViewSink(ctx, token, id) if err != nil { return err @@ -196,14 +193,14 @@ func (es sinksStreamProducer) DeleteSink(ctx context.Context, token, id string) return nil } -func (es sinksStreamProducer) ValidateSink(ctx context.Context, token string, sink sinks.Sink) (sinks.Sink, error) { +func (es eventStore) ValidateSink(ctx context.Context, token string, sink sinks.Sink) (sinks.Sink, error) { return es.svc.ValidateSink(ctx, token, sink) } -// NewSinkStreamProducerMiddleware returns wrapper around sinks service that sends +// NewEventStoreMiddleware returns wrapper around sinks service that sends // events to event store. -func NewSinkStreamProducerMiddleware(svc sinks.SinkService, client *redis.Client) sinks.SinkService { - return sinksStreamProducer{ +func NewEventStoreMiddleware(svc sinks.SinkService, client *redis.Client) sinks.SinkService { + return eventStore{ svc: svc, client: client, } diff --git a/sinks/sinks.go b/sinks/sinks.go index a5e8bf29e..14cb8455f 100644 --- a/sinks/sinks.go +++ b/sinks/sinks.go @@ -7,14 +7,13 @@ package sinks import ( "context" "database/sql/driver" - "time" - "github.com/orb-community/orb/pkg/errors" "github.com/orb-community/orb/pkg/types" "github.com/orb-community/orb/sinks/authentication_type" "github.com/orb-community/orb/sinks/authentication_type/basicauth" "github.com/orb-community/orb/sinks/backend" "go.uber.org/zap" + "time" ) var ( @@ -53,8 +52,6 @@ const ( Error Idle Warning - Provisioning - ProvisioningError ) type State int @@ -65,8 +62,6 @@ var stateMap = [...]string{ "error", "idle", "warning", - "provisioning", - "provisioning_error", } const MetadataLabelOtel = "opentelemetry" @@ -77,13 +72,11 @@ type Filter struct { } var stateRevMap = map[string]State{ - "unknown": Unknown, - "active": Active, - "error": Error, - "idle": Idle, - "warning": Warning, - "provisioning": Provisioning, - "provisioning_error": ProvisioningError, + "unknown": Unknown, + "active": Active, + "error": Error, + "idle": Idle, + "warning": Warning, } func (s State) String() string { @@ -104,10 +97,6 @@ func (s *State) Scan(value interface{}) error { } func (s State) Value() (driver.Value, error) { return s.String(), nil } -func NewStateFromString(state string) State { - return stateRevMap[state] -} - func NewConfigBackends(e backend.Backend, a authentication_type.AuthenticationType) Configuration { return Configuration{ Exporter: e, diff --git a/sinks/sinks_service.go b/sinks/sinks_service.go index dee2c669d..faf942bc1 100644 --- a/sinks/sinks_service.go +++ b/sinks/sinks_service.go @@ -310,6 +310,8 @@ func (svc sinkService) UpdateSink(ctx context.Context, token string, sink Sink) defaultMetadata := make(types.Metadata, 1) defaultMetadata["opentelemetry"] = "enabled" sink.Config.Merge(defaultMetadata) + sink.State = Unknown + sink.Error = "" if sink.Format == "yaml" { configDataByte, err := yaml.Marshal(sink.Config) if err != nil { @@ -473,7 +475,9 @@ func (svc sinkService) ChangeSinkStateInternal(ctx context.Context, sinkID strin } func (svc sinkService) validateBackend(sink *Sink) (be backend.Backend, err error) { - if !backend.HaveBackend(sink.Backend) { + if backend.HaveBackend(sink.Backend) { + sink.State = Unknown + } else { return nil, ErrInvalidBackend } sinkBe := backend.GetBackend(sink.Backend) diff --git a/ui/README.md b/ui/README.md index 98c139769..13bb86cb8 100644 --- a/ui/README.md +++ b/ui/README.md @@ -9,10 +9,10 @@ The following are needed to run the UI: -* [node - lts/fermium](https://nodejs.org/en/blog/release/v14.21.3/) -* [npm](https://github.com/npm/cli/tree/v6.14.18) -> If using [nvm](https://github.com/nvm-sh/nvm), simply run -> `nvm install lts/fermium` +* [node](https://nodejs.org/en/blog/release/v12.21.0/) +* [npm](https://github.com/npm/cli/tree/v7.22.0) + +*It is recomended to build the UI using [yarn](https://www.npmjs.com/package/yarn)* ### Install @@ -24,7 +24,7 @@ git clone git@github.com:orb-community/orb.git --no-checkout --depth 1 ${path} # however you clone the project cd ${path}/ui -npm install +yarn install ``` ### Usage @@ -32,7 +32,7 @@ npm install A developer build from the source can be achieved using the following command: ```bash -npm run build +yarn build ``` *(Check [package.json](./package.json) file for available tasks.)* @@ -42,7 +42,7 @@ npm run build While developing, it is useful to serve UI locally and have your changes to the code having effect immediately. -The command `npm run start` will generate a dev build and serve it at `http://localhost:4200/`. +The commands `yarn start` and `yarn start:withmock` will generate a dev build and serve it at `http://localhost:4200/`. *(Note that `http://localhost:4200/` is for development use only, and is not intended to be used by the end-user.)* @@ -69,7 +69,9 @@ fs.inotify.max_user_watches=524288 See [data examples](https://github.com/orb-community/orb/wiki/Orb-UI---Entities-Data-Examples) for examples of *Orb Entities* to aid in UI design , form validation and related info. + --- + ## QA & Testing Quality Assurance & Test frameworks and scripts are still a *WORK IN PROGRESS* diff --git a/ui/docker/Dockerfile b/ui/docker/Dockerfile index 0e20a297c..9bacbdae4 100644 --- a/ui/docker/Dockerfile +++ b/ui/docker/Dockerfile @@ -8,7 +8,7 @@ ARG ENV_GTAGID="" COPY ./ /app/ -RUN GTAGID=$ENV_GTAGID npm run build:prod +RUN GTAGID=$ENV_GTAGID yarn build:prod # Stage 1, based on Nginx, to have only the compiled app, ready for production with Nginx FROM nginx:1.13-alpine diff --git a/ui/docker/Dockerfile.buildyarn b/ui/docker/Dockerfile.buildyarn index 03d3fab30..3e3721a3c 100644 --- a/ui/docker/Dockerfile.buildyarn +++ b/ui/docker/Dockerfile.buildyarn @@ -1,7 +1,6 @@ # Stage 0, based on Node.js, install all dependencies -FROM node:14.21.3 +FROM node:14.17 WORKDIR /app COPY package.json /app/ - -RUN npm install \ No newline at end of file +RUN yarn install diff --git a/ui/package.json b/ui/package.json index daafae70b..d2661df79 100644 --- a/ui/package.json +++ b/ui/package.json @@ -21,9 +21,8 @@ "test": "ng test", "test:coverage": "rimraf coverage && npm run test -- --code-coverage", "lint": "ng lint", - "lint:fix": "tslint --fix -c ./tslint.json 'src/**/*{.ts,.tsx}'", + "lint:fix": "ng lint orb-ui --fix", "lint:styles": "stylelint ./src/**/*.scss", - "lint:styles:fix": "stylelint ./src/**/*.scss --fix", "lint:ci": "npm run lint && npm run lint:styles", "pree2e": "webdriver-manager update --standalone false --gecko false", "e2e": "ng e2e", diff --git a/ui/src/app/@core/core.module.ts b/ui/src/app/@core/core.module.ts index 8f8a1f888..7ea5a6ce7 100644 --- a/ui/src/app/@core/core.module.ts +++ b/ui/src/app/@core/core.module.ts @@ -49,8 +49,8 @@ export const NB_CORE_PROVIDERS = [ method: 'put', redirect: { success: '/auth/login', - failure: null, - }, + failure: null + } }, logout: { method: null, redirect: { success: '/', failure: '/' } }, @@ -94,7 +94,7 @@ export const NB_CORE_PROVIDERS = [ }, tos: { required: true, - }, + } }, }, }).providers, diff --git a/ui/src/app/@theme/styles/_overrides.scss b/ui/src/app/@theme/styles/_overrides.scss index 362b4af04..c30ad1293 100644 --- a/ui/src/app/@theme/styles/_overrides.scss +++ b/ui/src/app/@theme/styles/_overrides.scss @@ -94,6 +94,7 @@ } nb-card { + border: none; /* width */ ::-webkit-scrollbar { width: 4px; @@ -111,7 +112,7 @@ background: #969fb9; border-radius: 16px; } - scrollbar-color: #969fb9 #969fb980; + scrollbar-color: #969fb9 #969fb980; margin: 20px 0 !important; } @@ -182,8 +183,8 @@ align-content: flex-start; justify-content: flex-start; align-items: stretch; + overflow-x: auto; min-width: 800px; - overflow-x: hidden !important; } .orb-table { @@ -193,19 +194,19 @@ } .orb-table-small { - min-height: calc(40vh); + min-height: calc(25vh); min-width: 600px; - max-height: calc(40vh); + max-height: calc(25vh); } .orb-service- { - &new, &unknown { + &new { color: #9b51e0; } - &online, &healthy, &active { + &online, &healthy { color: #6fcf97; } - &stale, &idle { + &stale { color: #f2994a; } &error, &failure { @@ -214,38 +215,6 @@ &offline, &none { color: #969fb9; } - &provisioning { - color: #3089fc; - } - &provioning_error { - color: #df316f; - } - &warning { - color: #f2c94c; - } -} -.orb-service-background- { - &new, &unknown { - background-color: #9b51e0; - } - &online, &healthy, &active { - background-color: #6fcf97; - } - &stale, &idle { - background-color: #f2994a; - } - &error, &failure, &provioning_error { - background-color: #df316f; - } - &offline, &none { - background-color: #969fb9; - } - &warning { - background-color: #f2c94c; - } - &provisioning { - background-color: #3089fc; - } } .required { color: #df316f; @@ -289,7 +258,7 @@ } } .delete-selected { - color: #ffffff !important; + color: #ffffff !important; font-family: 'Montserrat', sans-serif; font-weight: 600; text-transform: none !important; @@ -312,7 +281,9 @@ p { font-family: 'Montserrat' !important; } - +button { + font-family: 'Montserrat' !important; +} label { font-family: 'Montserrat' !important; } @@ -326,15 +297,14 @@ input { } .next-button { border-radius: 16px !important; - background: #3089fc !important; + background: #3089FC !important; padding: 8px 24px !important; color: #fff !important; - border: none !important; + border: none !important; outline: none !important; font-size: 14px !important; font-weight: 600 !important; transition: background-color 0.3s ease !important; - font-family: 'Montserrat'; } .next-button:hover { background-color: #509afc!important; @@ -343,17 +313,16 @@ input { background: #2b3148 !important; } .cancel-back-button { - border-radius: 16px !important; + border-radius: 16px !important;; padding: 8px 24px !important; background-color: transparent !important; - color: #3089fc !important; + color: #3089FC !important; border: none !important; outline: none !important; font-size: 14px !important; - font-weight: 600 !important; + font-weight: 600 !important; transition: background-color 0.3s ease !important; margin-right: 0 !important; - font-family: 'Montserrat'; } .cancel-back-button:hover { background-color: rgba(255, 255, 255, 0.05) !important; diff --git a/ui/src/app/auth/components/auth.component.scss b/ui/src/app/auth/components/auth.component.scss index 310b18736..6bf4c46ee 100644 --- a/ui/src/app/auth/components/auth.component.scss +++ b/ui/src/app/auth/components/auth.component.scss @@ -1,7 +1,3 @@ -@import '~bootstrap/scss/mixins/breakpoints'; -@import '~@nebular/theme/styles/global/breakpoints'; -@import '../../@theme/styles/themes'; - :host { $auth-layout-padding: 2.5rem; @@ -33,7 +29,7 @@ margin: auto; } -@include media-breakpoint-down(sm) { +media-breakpoint-down(sm) { nb-card { border-radius: 0; height: 100vh; @@ -43,12 +39,10 @@ ::ng-deep { nb-layout .layout .layout-container .content .columns nb-layout-column { padding: $auth-layout-padding; - } - @include media-breakpoint-down(sm) { - nb-layout .layout .layout-container .content .columns nb-layout-column { + + media-breakpoint-down(sm) { padding: 0; - } } - + } } } diff --git a/ui/src/app/auth/pages/login/login.component.scss b/ui/src/app/auth/pages/login/login.component.scss index 01f47cce5..3f009f11c 100644 --- a/ui/src/app/auth/pages/login/login.component.scss +++ b/ui/src/app/auth/pages/login/login.component.scss @@ -53,7 +53,7 @@ top: 0; width: 600px; - input[type='checkbox'] { + input[type="checkbox"] { height: 2rem; padding: 0 1rem; margin-top: 0.5rem; diff --git a/ui/src/app/auth/pages/register/register.component.ts b/ui/src/app/auth/pages/register/register.component.ts index 46547661f..24b15a02c 100644 --- a/ui/src/app/auth/pages/register/register.component.ts +++ b/ui/src/app/auth/pages/register/register.component.ts @@ -59,7 +59,7 @@ export class RegisterComponent extends NbRegisterComponent implements OnInit { this.errors = this.messages = []; this.submitted = true; this.repeatedEmail = null; - + const { email, password, company } = this.user; this.authService .register(this.strategy, { @@ -87,7 +87,7 @@ export class RegisterComponent extends NbRegisterComponent implements OnInit { } } }); - + } authenticateAndRedirect(email, password) { diff --git a/ui/src/app/common/interfaces/orb/sink.interface.ts b/ui/src/app/common/interfaces/orb/sink.interface.ts index 104650ddf..658618577 100644 --- a/ui/src/app/common/interfaces/orb/sink.interface.ts +++ b/ui/src/app/common/interfaces/orb/sink.interface.ts @@ -16,9 +16,6 @@ export enum SinkStates { error = 'error', idle = 'idle', unknown = 'unknown', - provisioning = 'provisioning', - provisioning_error = 'provisioning_error', - warning = 'warning', } /** @@ -26,7 +23,7 @@ export enum SinkStates { */ export enum SinkBackends { prometheus = 'prometheus', - otlp = 'otlphttp', + otlp = 'otlphttp' } /** diff --git a/ui/src/app/common/interfaces/orb/sink/config/otlp.config.interface.ts b/ui/src/app/common/interfaces/orb/sink/config/otlp.config.interface.ts index f3643b832..4d191fce6 100644 --- a/ui/src/app/common/interfaces/orb/sink/config/otlp.config.interface.ts +++ b/ui/src/app/common/interfaces/orb/sink/config/otlp.config.interface.ts @@ -23,13 +23,13 @@ export interface OtlpConfig extends SinkConfig { * Username|Email(?) {string} */ username?: string; - }; + } exporter: |any| { /** * Endpoint (Otlp sinks) or Remote Host (Prometheus sink) Link {string} */ endpoint?: string; remote_host?: string; - }; - -} + } + +} \ No newline at end of file diff --git a/ui/src/app/common/services/code.editor.service.ts b/ui/src/app/common/services/code.editor.service.ts index ad45681d5..07a00df63 100644 --- a/ui/src/app/common/services/code.editor.service.ts +++ b/ui/src/app/common/services/code.editor.service.ts @@ -1,4 +1,4 @@ -import { Injectable } from '@angular/core'; +import { Injectable } from "@angular/core"; import * as YAML from 'yaml'; @Injectable({ @@ -29,8 +29,8 @@ export class CodeEditorService { } checkEmpty (object) { - for (const key in object) { - if (object[key] === '' || typeof object[key] === 'undefined' || object[key] === null) { + for (let key in object) { + if (object[key] === "" || typeof object[key] === "undefined" || object[key] === null) { return true; } } diff --git a/ui/src/app/common/services/dataset/dataset.policies.service.ts b/ui/src/app/common/services/dataset/dataset.policies.service.ts index 78f88c959..97697e5a4 100644 --- a/ui/src/app/common/services/dataset/dataset.policies.service.ts +++ b/ui/src/app/common/services/dataset/dataset.policies.service.ts @@ -40,7 +40,7 @@ export class DatasetPoliciesService { .get(`${environment.datasetPoliciesUrl}/${id}`) .pipe( catchError((err) => { - if (err.status !== 404 && err.error.error !== 'non-existent entity') { + if (err.status !== 404 && err.error.error !== "non-existent entity") { this.notificationsService.error( 'Failed to fetch Dataset of this Policy', `Error: ${err.status} - ${err.statusText}`, diff --git a/ui/src/app/common/services/filter.service.ts b/ui/src/app/common/services/filter.service.ts index f97cc3b45..8d1d46701 100644 --- a/ui/src/app/common/services/filter.service.ts +++ b/ui/src/app/common/services/filter.service.ts @@ -75,7 +75,7 @@ export class FilterService { } removeFilterByParam(param: string) { - this.removeFilter(this._filters.findIndex((f) => f.param === param && f.name === 'Name' && f)); + this.removeFilter(this._filters.findIndex((filter) => filter.param === param && filter.name === 'Name' && filter)); } // make a decorator out of this? diff --git a/ui/src/app/common/services/orb.service.ts b/ui/src/app/common/services/orb.service.ts index 83307c547..acf5f0aa8 100644 --- a/ui/src/app/common/services/orb.service.ts +++ b/ui/src/app/common/services/orb.service.ts @@ -74,7 +74,7 @@ export class OrbService implements OnDestroy { this.pollController$.pipe( switchMap((control) => { if (control === PollControls.RESUME) - return defer(() => timer(1, this.pollInterval)); + return defer(() => timer(1, this.pollInterval)); return EMPTY; }), ), @@ -116,11 +116,12 @@ export class OrbService implements OnDestroy { if (localStorage.getItem(pollIntervalKey)) { pollInterval = Number(localStorage.getItem(pollIntervalKey)); - } else { + } + else { pollInterval = 60000; localStorage.setItem(pollIntervalKey, pollInterval.toString()); } - + return pollInterval; } @@ -208,7 +209,7 @@ export class OrbService implements OnDestroy { : of([]); return groups$.pipe(map((groups) => ({ agent, groups, datasets }))); }), - ), + ) ); } @@ -266,8 +267,8 @@ export class OrbService implements OnDestroy { policy: { ...policy, groups, datasets }, groups, })), - ), - ); + ) + ); } getSinkView(id: string) { diff --git a/ui/src/app/pages/dashboard/dashboard.component.scss b/ui/src/app/pages/dashboard/dashboard.component.scss index 601e0580b..af7247aa2 100644 --- a/ui/src/app/pages/dashboard/dashboard.component.scss +++ b/ui/src/app/pages/dashboard/dashboard.component.scss @@ -1,5 +1,5 @@ nb-card { - margin: 10px !important; + margin: 10px 10px !important; nb-card-header { text-align: center; background: #232940; diff --git a/ui/src/app/pages/datasets/dataset-from/dataset-from.component.html b/ui/src/app/pages/datasets/dataset-from/dataset-from.component.html index d879673ae..952635bfd 100644 --- a/ui/src/app/pages/datasets/dataset-from/dataset-from.component.html +++ b/ui/src/app/pages/datasets/dataset-from/dataset-from.component.html @@ -1,4 +1,6 @@ - + Dataset Details + -
@@ -40,7 +31,7 @@ There are no agent groups available
-

-
+
@@ -116,18 +101,18 @@
-
+
*
-
+
At least one Sink is required. @@ -140,29 +125,31 @@
-
+ class="dataset-delete-button" + ghost + nbButton + shape="round" + type="button">Delete Dataset diff --git a/ui/src/app/pages/datasets/dataset-from/dataset-from.component.scss b/ui/src/app/pages/datasets/dataset-from/dataset-from.component.scss index d35129a97..26d282f03 100644 --- a/ui/src/app/pages/datasets/dataset-from/dataset-from.component.scss +++ b/ui/src/app/pages/datasets/dataset-from/dataset-from.component.scss @@ -1,27 +1,3 @@ -nb-card { - padding: 0 !important; - width: 600px; - height: fit-content; - min-height: 400px; - nb-card-header { - background: #232940 !important; - color: #969fb9 !important; - } - - nb-card-body { - overflow: hidden !important; - margin: 2rem 3rem !important; - padding: 0 !important; - } - -} -.info-icon { - font-size: 14px; - color: #ffffff; - margin-left: 3px; -} - - nb-icon { vertical-align: middle; } @@ -39,6 +15,10 @@ nb-icon { color: #df316f; } +nb-select { + width: 100%; +} + button { float: right; } @@ -54,37 +34,14 @@ nb-tabset { } .dataset-save-button { - margin-top: 3px; - background-color: #3089fc; - &.btn-disabled { - background: #232940 !important; - } + margin-top: 6px; + background-color: blue; } .dataset-delete-button { color: #df316f !important; float: left; - font-size: 13px !important; - font-weight: 600 !important; - padding: 6px 16px !important; - border-radius: 16px !important; - background-color: transparent; - outline: none; - border: none; - font-family: 'Montserrat'; - transition: background-color 0.3s ease !important; } -.dataset-delete-button:hover { - background-color: rgba(255, 255, 255, 0.05) !important; - } - .label-name { - color: #969fb9; - font-size: 13px; - margin-bottom: 0 !important; - } - .group-name { - margin-bottom: 0 !important; - } .orb-close-dialog { background-color: #23294000; @@ -252,19 +209,14 @@ nb-accordion { overflow-y: inherit !important; } -.input-agent-group { - width: 100%; -} -.match-agents-button { - background-color: transparent; - border: none; - outline: none; - color: #3089fc; - font-size: 12px; - float: left; - font-weight: 600; - margin-top: 2px; -} -.match-agents-button:hover { - color: #81b8ff; +.dataset-agent-group-input { + &:read-only { + background-color: #232940 !important; + cursor: default; + opacity: 0.5; + } } + +.input-agent-group { + width: 560px; +} \ No newline at end of file diff --git a/ui/src/app/pages/datasets/dataset-from/dataset-from.component.ts b/ui/src/app/pages/datasets/dataset-from/dataset-from.component.ts index f0976e437..829ac94f3 100644 --- a/ui/src/app/pages/datasets/dataset-from/dataset-from.component.ts +++ b/ui/src/app/pages/datasets/dataset-from/dataset-from.component.ts @@ -1,4 +1,4 @@ -import { ChangeDetectorRef, Component, Input, OnChanges, OnInit, SimpleChange, SimpleChanges } from '@angular/core'; +import { ChangeDetectorRef, Component, Input, OnInit } from '@angular/core'; import { AbstractControl, FormBuilder, @@ -17,7 +17,6 @@ import { DatasetPoliciesService } from 'app/common/services/dataset/dataset.poli import { NotificationsService } from 'app/common/services/notifications/notifications.service'; import { SinksService } from 'app/common/services/sinks/sinks.service'; import { DatasetDeleteComponent } from 'app/pages/datasets/delete/dataset.delete.component'; -import { AgentMatchComponent } from 'app/pages/fleet/agents/match/agent.match.component'; import { Observable, of } from 'rxjs'; export const DATASET_RESPONSE = { @@ -39,7 +38,7 @@ const CONFIG = { templateUrl: './dataset-from.component.html', styleUrls: ['./dataset-from.component.scss'], }) -export class DatasetFromComponent implements OnInit, OnChanges { +export class DatasetFromComponent implements OnInit { @Input() dataset: Dataset; @@ -51,8 +50,6 @@ export class DatasetFromComponent implements OnInit, OnChanges { isEdit: boolean; - isGroupSelected: boolean = false; - selectedGroup: string; groupName: string; selectedPolicy: string; @@ -97,10 +94,6 @@ export class DatasetFromComponent implements OnInit, OnChanges { this.getDatasetAvailableConfigList(); this.readyForms(); - - this.form.get('agent_group_id').valueChanges.subscribe(value => { - this.ngOnChanges({ agent_group_id: new SimpleChange(null, value, true) }); - }); } private _selectedSinks: Sink[]; @@ -180,24 +173,6 @@ export class DatasetFromComponent implements OnInit, OnChanges { this.filteredAgentGroups$ = of(this.filter(value)); } - onMatchingAgentsModal() { - this.dialogService.open(AgentMatchComponent, { - context: { - agentGroupId: this.form.controls.agent_group_id.value, - policy: this.policy, - }, - autoFocus: true, - closeOnEsc: true, - }); - } - ngOnChanges(changes: SimpleChanges): void { - if (changes.agent_group_id.currentValue) { - this.isGroupSelected = true; - } else { - this.isGroupSelected = false; - } - } - ngOnInit(): void { if (!!this.group) { this.selectedGroup = this.group.id; diff --git a/ui/src/app/pages/datasets/delete/dataset.delete.component.scss b/ui/src/app/pages/datasets/delete/dataset.delete.component.scss index 812be145b..ecb4178c6 100644 --- a/ui/src/app/pages/datasets/delete/dataset.delete.component.scss +++ b/ui/src/app/pages/datasets/delete/dataset.delete.component.scss @@ -1,5 +1,5 @@ nb-card { - padding: 0 !important; + max-width: 38rem !important; nb-card-header { background: #232940 !important; @@ -12,10 +12,6 @@ nb-card { p { color: #969fb9 !important; - margin-bottom: 1rem !important; - font-weight: 500 !important; - font-size: 14px !important; - line-height: 24px !important; } .ns1-red { diff --git a/ui/src/app/pages/datasets/policies.agent/add/agent.policy.add.component.html b/ui/src/app/pages/datasets/policies.agent/add/agent.policy.add.component.html index 2a52df198..e3a851290 100644 --- a/ui/src/app/pages/datasets/policies.agent/add/agent.policy.add.component.html +++ b/ui/src/app/pages/datasets/policies.agent/add/agent.policy.add.component.html @@ -154,8 +154,8 @@

{{ isEdit ? 'Edit Agent Policy' : 'Create Agent Policy'}}

YAML
-
- Paste or Upload your {{isJsonMode ? 'Json' : 'Yaml'}} configuration +
+

Paste or Upload your {{isJsonMode ? 'Json' : 'Yaml'}} configuration

-
-
- - -
-
- -
-
- -
-
-
- -
-
- -
-
- -
-
-
-
+
+
+ + +
+
+ + +
diff --git a/ui/src/app/pages/datasets/policies.agent/view/agent.policy.view.component.scss b/ui/src/app/pages/datasets/policies.agent/view/agent.policy.view.component.scss index 6daef030f..19ed35e11 100644 --- a/ui/src/app/pages/datasets/policies.agent/view/agent.policy.view.component.scss +++ b/ui/src/app/pages/datasets/policies.agent/view/agent.policy.view.component.scss @@ -6,14 +6,6 @@ h4 { line-height: 2rem; margin-bottom: 1.5rem; } -.row { - display: flex; - -} -nb-tab { - padding: 0 !important; - overflow: hidden !important; -} nb-card { border: transparent; diff --git a/ui/src/app/pages/datasets/policies.agent/view/agent.policy.view.component.ts b/ui/src/app/pages/datasets/policies.agent/view/agent.policy.view.component.ts index 21c2e83eb..04fffa9ef 100644 --- a/ui/src/app/pages/datasets/policies.agent/view/agent.policy.view.component.ts +++ b/ui/src/app/pages/datasets/policies.agent/view/agent.policy.view.component.ts @@ -59,8 +59,6 @@ export class AgentPolicyViewComponent implements OnInit, OnDestroy { lastUpdate: Date | null = null; - errorConfigMessage: string; - @ViewChild(PolicyDetailsComponent) detailsComponent: PolicyDetailsComponent; @ViewChild(PolicyInterfaceComponent) @@ -77,7 +75,6 @@ export class AgentPolicyViewComponent implements OnInit, OnDestroy { private editor: CodeEditorService, ) { this.isRequesting = false; - this.errorConfigMessage = ''; } ngOnInit() { @@ -89,7 +86,8 @@ export class AgentPolicyViewComponent implements OnInit, OnDestroy { this.isLoading = true; if (newPolicyId) { this.policyId = newPolicyId; - } else { + } + else { this.policyId = this.route.snapshot.paramMap.get('id'); } this.retrievePolicy(); @@ -98,14 +96,10 @@ export class AgentPolicyViewComponent implements OnInit, OnDestroy { isEditMode() { - const resp = Object.values(this.editMode).reduce( + return Object.values(this.editMode).reduce( (prev, cur) => prev || cur, false, ); - if (!resp) { - this.errorConfigMessage = ''; - } - return resp; } canSave() { @@ -113,25 +107,13 @@ export class AgentPolicyViewComponent implements OnInit, OnDestroy { ? this.detailsComponent?.formGroup?.status === 'VALID' : true; - const config = this.interfaceComponent?.code; + let config = this.interfaceComponent?.code let interfaceValid = false; - if (this.policy.format === 'json') { - if (this.editor.isJson(config)) { - interfaceValid = true; - this.errorConfigMessage = ''; - } else { - interfaceValid = false; - this.errorConfigMessage = 'Invalid JSON configuration, check syntax errors'; - } - } else if (this.policy.format === 'yaml') { - if (this.editor.isYaml(config) && !this.editor.isJson(config)) { - interfaceValid = true; - this.errorConfigMessage = ''; - } else { - interfaceValid = false; - this.errorConfigMessage = 'Invalid YAML configuration, check syntax errors'; - } + if (this.editor.isJson(config)) { + interfaceValid = true; + } else if (this.editor.isYaml(config)) { + interfaceValid = true; } return detailsValid && interfaceValid; } @@ -160,9 +142,6 @@ export class AgentPolicyViewComponent implements OnInit, OnDestroy { try { if (format === 'yaml') { - if (this.editor.isJson(policyInterface)) { - throw new Error('Invalid YAML format'); - } yaml.load(policyInterface); interfacePartial = { @@ -186,23 +165,22 @@ export class AgentPolicyViewComponent implements OnInit, OnDestroy { this.policiesService.editAgentPolicy(payload).subscribe( (resp) => { - this.notifications.success('Agent Policy updated successfully', ''); - this.discard(); - this.policy = resp; - this.orb.refreshNow(); - this.isRequesting = false; + this.notifications.success('Agent Policy updated successfully', ''); + this.discard(); + this.policy = resp; + this.orb.refreshNow(); + this.isRequesting = false; }, - (err) => { + (error) => { this.isRequesting = false; - }, - ); + } + ); } catch (err) { this.notifications.error( 'Failed to edit Agent Policy', `Error: Invalid ${format.toUpperCase()}`, ); - this.isRequesting = false; } } @@ -229,21 +207,21 @@ export class AgentPolicyViewComponent implements OnInit, OnDestroy { if (confirm) { this.duplicatePolicy(this.policy); } - }); + }) } duplicatePolicy(agentPolicy: any) { this.policiesService - .duplicateAgentPolicy(agentPolicy.id) - .subscribe((newAgentPolicy) => { - if (newAgentPolicy?.id) { - this.notifications.success( - 'Agent Policy Duplicated', - `New Agent Policy Name: ${newAgentPolicy?.name}`, - ); - this.router.navigateByUrl(`/pages/datasets/policies/view/${newAgentPolicy?.id}`); - this.fetchData(newAgentPolicy.id); - } - }); + .duplicateAgentPolicy(agentPolicy.id) + .subscribe((newAgentPolicy) => { + if (newAgentPolicy?.id) { + this.notifications.success( + 'Agent Policy Duplicated', + `New Agent Policy Name: ${newAgentPolicy?.name}`, + ); + this.router.navigateByUrl(`/pages/datasets/policies/view/${newAgentPolicy?.id}`); + this.fetchData(newAgentPolicy.id); + } + }); } ngOnDestroy() { @@ -276,14 +254,14 @@ export class AgentPolicyViewComponent implements OnInit, OnDestroy { } hasChanges() { - const policyDetails = this.detailsComponent.formGroup?.value; + let policyDetails = this.detailsComponent.formGroup?.value; const tags = this.detailsComponent.selectedTags; - const description = this.policy.description ? this.policy.description : ''; - const formsDescription = policyDetails.description === null ? '' : policyDetails.description; + const description = this.policy.description ? this.policy.description : ""; + const formsDescription = policyDetails.description === null ? "" : policyDetails.description - const selectedTags = JSON.stringify(tags); - const orb_tags = JSON.stringify(this.policy.tags); + let selectedTags = JSON.stringify(tags); + let orb_tags = JSON.stringify(this.policy.tags); if (policyDetails.name !== this.policy.name || formsDescription !== description || selectedTags !== orb_tags) { return true; diff --git a/ui/src/app/pages/fleet/agents/add/agent.add.component.scss b/ui/src/app/pages/fleet/agents/add/agent.add.component.scss index 27c89c7b0..e6bdcc01c 100644 --- a/ui/src/app/pages/fleet/agents/add/agent.add.component.scss +++ b/ui/src/app/pages/fleet/agents/add/agent.add.component.scss @@ -110,7 +110,7 @@ nb-card-footer { .review-label { font-family: 'Montserrat'; font-size: 13px; - font-weight: 400 !important; + font-weight: 400 !important; margin: 0; color: #969fb9 !important; } diff --git a/ui/src/app/pages/fleet/agents/delete/agent.delete.component.scss b/ui/src/app/pages/fleet/agents/delete/agent.delete.component.scss index 003444e84..8ac634efb 100644 --- a/ui/src/app/pages/fleet/agents/delete/agent.delete.component.scss +++ b/ui/src/app/pages/fleet/agents/delete/agent.delete.component.scss @@ -1,6 +1,5 @@ nb-card { max-width: 38rem !important; - padding: 0 !important; nb-card-header { background: #232940 !important; @@ -13,10 +12,6 @@ nb-card { p { color: #969fb9 !important; - margin-bottom: 1rem !important; - font-weight: 500 !important; - font-size: 14px !important; - line-height: 24px !important; } .ns1-red { diff --git a/ui/src/app/pages/fleet/agents/key/agent.key.component.scss b/ui/src/app/pages/fleet/agents/key/agent.key.component.scss index 5acf7fd09..c87226b2e 100644 --- a/ui/src/app/pages/fleet/agents/key/agent.key.component.scss +++ b/ui/src/app/pages/fleet/agents/key/agent.key.component.scss @@ -13,14 +13,14 @@ nb-card { float: right; } nb-icon { - float: right; + float: right ; } } nb-card-body { border-bottom-left-radius: 0.5rem; border-bottom-right-radius: 0.5rem; - margin: 0 2rem 2rem; + margin: 0 2rem 2rem 2rem; padding: 0; p { diff --git a/ui/src/app/pages/fleet/agents/key/agent.key.component.ts b/ui/src/app/pages/fleet/agents/key/agent.key.component.ts index a8202f70d..5310006dd 100644 --- a/ui/src/app/pages/fleet/agents/key/agent.key.component.ts +++ b/ui/src/app/pages/fleet/agents/key/agent.key.component.ts @@ -89,7 +89,7 @@ orbcommunity/orb-agent run -c /usr/local/orb/agent.yaml`; } else if (target === 'command') { this.copyCommandIcon = 'checkmark-outline'; setTimeout(() => { - this.copyCommandIcon = 'copy-outline'; + this.copyCommandIcon = "copy-outline"; }, 2000); } } @@ -106,7 +106,8 @@ orbcommunity/orb-agent run -c /usr/local/orb/agent.yaml`; a.download = `${this.agent.id}.txt`; a.click(); window.URL.revokeObjectURL(url); - } else if (commandType === 'fileConfig') { + } + else if (commandType === 'fileConfig') { const blob = new Blob([this.fileConfigCommandCopy], { type: 'text/plain' }); const url = window.URL.createObjectURL(blob); const a = document.createElement('a'); diff --git a/ui/src/app/pages/fleet/agents/list/agent.list.component.ts b/ui/src/app/pages/fleet/agents/list/agent.list.component.ts index 7a7e2b886..702b66eaf 100644 --- a/ui/src/app/pages/fleet/agents/list/agent.list.component.ts +++ b/ui/src/app/pages/fleet/agents/list/agent.list.component.ts @@ -53,7 +53,7 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe canResetAgents: boolean; isResetting: boolean; - + private agentsSubscription: Subscription; @@ -111,7 +111,7 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe map(agents => { return agents.map(agent => { let version: string; - if (agent.state !== AgentStates.new && agent?.agent_metadata?.orb_agent?.version) { + if (agent.state !== 'new') { version = agent.agent_metadata.orb_agent.version; } else { version = '-'; @@ -121,7 +121,7 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe version, }; }); - }), + }) ); this.columns = []; @@ -210,7 +210,7 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe minWidth: 150, name: 'Name', cellTemplate: this.agentNameTemplateCell, - resizeable: true, + resizeable: true, }, { prop: 'state', @@ -219,7 +219,7 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe canAutoResize: true, name: 'Status', cellTemplate: this.agentStateTemplateRef, - resizeable: true, + resizeable: true, }, { prop: 'policy_agg_info', @@ -228,7 +228,7 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe minWidth: 150, name: 'Policies', cellTemplate: this.agentPolicyStateTemplateRef, - resizeable: true, + resizeable: true, }, { prop: 'combined_tags', @@ -245,7 +245,7 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe .map(([key, value]) => `${key}:${value}`) .join(','), ), - resizeable: true, + resizeable: true, }, { prop: 'version', @@ -255,7 +255,7 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe name: 'Version', sortable: true, cellTemplate: this.agentVersionTemplateCell, - resizeable: true, + resizeable: true, }, { prop: 'ts_last_hb', @@ -265,7 +265,7 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe name: 'Last Activity', sortable: true, cellTemplate: this.agentLastActivityTemplateCell, - resizeable: true, + resizeable: true, }, { name: '', @@ -275,19 +275,19 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe canAutoResize: true, sortable: false, cellTemplate: this.actionsTemplateCell, - resizeable: true, + resizeable: true, }, ]; } - public onCheckboxChange(event: any, row: any): void { - const selectedAgent = { + public onCheckboxChange(event: any, row: any): void { + let selectedAgent = { id: row.id, resetable: true, name: row.name, state: row.state, - }; + } if (this.getChecked(row) === false) { let resetable = true; if (row.state === 'new' || row.state === 'offline') { @@ -349,7 +349,7 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe } onOpenDeleteSelected() { const selected = this.selected; - const elementName = 'Agents'; + const elementName = "Agents" this.dialogService .open(DeleteSelectedComponent, { context: { selected, elementName }, @@ -368,15 +368,15 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe deleteSelectedAgents() { this.selected.forEach((agent) => { this.agentService.deleteAgent(agent.id).subscribe(); - }); + }) this.notificationsService.success('All selected Agents delete requests succeeded', ''); } onOpenResetAgents() { - const selected = this.selected; + const size = this.selected.length; this.dialogService .open(AgentResetComponent, { - context: { selected }, + context: { size }, autoFocus: true, closeOnEsc: true, }) @@ -385,14 +385,14 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe this.resetAgents(); this.orb.refreshNow(); } - }); + }) } resetAgents() { if (!this.isResetting) { this.isResetting = true; this.selected.forEach((agent) => { this.agentService.resetAgent(agent.id).subscribe(); - }); + }) this.notifyResetSuccess(); this.selected = []; this.isResetting = false; @@ -409,7 +409,7 @@ export class AgentListComponent implements AfterViewInit, AfterViewChecked, OnDe name: row.name, state: row.state, resetable: row.state === 'new' || row.state === 'offline' ? false : true, - }; + } this.selected.push(policySelected); }); }); diff --git a/ui/src/app/pages/fleet/agents/match/agent.match.component.ts b/ui/src/app/pages/fleet/agents/match/agent.match.component.ts index 11d10a645..21ffc5217 100644 --- a/ui/src/app/pages/fleet/agents/match/agent.match.component.ts +++ b/ui/src/app/pages/fleet/agents/match/agent.match.component.ts @@ -8,7 +8,6 @@ import { AgentGroup } from 'app/common/interfaces/orb/agent.group.interface'; import { AgentsService } from 'app/common/services/agents/agents.service'; import { Router } from '@angular/router'; import { AgentPolicy, AgentPolicyStates } from 'app/common/interfaces/orb/agent.policy.interface'; -import { AgentGroupsService } from 'app/common/services/agents/agent.groups.service'; @Component({ selector: 'ngx-agent-match-component', @@ -22,9 +21,6 @@ export class AgentMatchComponent implements OnInit, AfterViewInit { @Input() agentGroup: AgentGroup; - @Input() - agentGroupId: string; - @Input() policy!: AgentPolicy; @@ -68,7 +64,6 @@ export class AgentMatchComponent implements OnInit, AfterViewInit { protected dialogRef: NbDialogRef, protected agentsService: AgentsService, protected router: Router, - protected groupsService: AgentGroupsService, ) { this.specificPolicy = false; } @@ -132,40 +127,29 @@ export class AgentMatchComponent implements OnInit, AfterViewInit { } onOpenView(agent: any) { - this.router.navigateByUrl(`pages/fleet/agents/view/${agent.id}`); + this.router.navigateByUrl(`pages/fleet/agents/view/${ agent.id }`); this.dialogRef.close(); } updateMatchingAgents() { - if (!!this.agentGroupId) { - this.groupsService.getAgentGroupById(this.agentGroupId).subscribe( - (resp) => { - this.agentGroup = resp; - this.getMatchingAgentsInfo(); - }, - ); - } else { - this.getMatchingAgentsInfo(); - } - } - getMatchingAgentsInfo() { const { tags } = this.agentGroup; const tagsList = Object.keys(tags).map(key => ({ [key]: tags[key] })); this.agentsService.getAllAgents(tagsList).subscribe( resp => { - if (!!this.policy) { + if(!!this.policy) { this.specificPolicy = true; this.agents = resp.map((agent) => { - const { policy_state } = agent; + const {policy_state} = agent; const policy_agg_info = !!policy_state && policy_state[this.policy.id]?.state || AgentPolicyStates.failedToApply; - return { ...agent, policy_agg_info }; - }); + return {...agent, policy_agg_info }; + }) } else { this.agents = resp; } }, ); } + onClose() { this.dialogRef.close(false); } diff --git a/ui/src/app/pages/fleet/agents/reset/agent.reset.component.html b/ui/src/app/pages/fleet/agents/reset/agent.reset.component.html index d2096fcdf..6a7488242 100644 --- a/ui/src/app/pages/fleet/agents/reset/agent.reset.component.html +++ b/ui/src/app/pages/fleet/agents/reset/agent.reset.component.html @@ -11,27 +11,17 @@ -

Are you sure you want to reset a total of {{ selected.length }} Agent(s)?

-
-
-
- {{ item.name }} -
-
- {{ item.state | titlecase }} -
-
-
-

*To confirm, type the amount of agents to be reset.

+

Are you sure you want to reset a total of {{ size }} Agents?

+

*To confirm, type the amount of agents to be reset.

+ placeholder="{{size}}" [(ngModel)]="validationInput" + data-orb-qa-id="input#size"> - {{selected.length}} + {{size}}
diff --git a/ui/src/app/pages/fleet/agents/reset/agent.reset.component.scss b/ui/src/app/pages/fleet/agents/reset/agent.reset.component.scss index 8c53dbf85..4f28bb2d8 100644 --- a/ui/src/app/pages/fleet/agents/reset/agent.reset.component.scss +++ b/ui/src/app/pages/fleet/agents/reset/agent.reset.component.scss @@ -1,6 +1,5 @@ nb-card { max-width: 38rem !important; - padding: 0 !important; nb-card-header { background: #232940 !important; @@ -13,10 +12,6 @@ nb-card { p { color: #969fb9 !important; - margin-bottom: 1rem !important; - font-weight: 500 !important; - font-size: 14px !important; - line-height: 24px !important; } .ns1-red { @@ -53,40 +48,4 @@ nb-card { } .ns1red { color: #df316f !important; - } - .element-list { - max-height: 225px; - overflow-y: auto; - margin-left: 20px; - } - .span-accent { - font-size: 13px; - font-weight: 600; - float: right; - } - .item-row { - display: flex; - align-items: center; - border-radius: 6px; - width: 300px; - padding-left: 3px; - font-size: 13px; - font-weight: 600; - } - .item-row:hover { - background-color: #1e263d; - } - .col-8 { - flex: 1; - padding-left: 0; - } - .col-3 { - flex: 1; - padding-right: 0; - } - .overflow-ellipsis { - white-space: nowrap !important; - overflow: hidden !important; - text-overflow: ellipsis !important; - max-width: 350px !important; - } + } \ No newline at end of file diff --git a/ui/src/app/pages/fleet/agents/reset/agent.reset.component.ts b/ui/src/app/pages/fleet/agents/reset/agent.reset.component.ts index 9fea705bf..59ec7a923 100644 --- a/ui/src/app/pages/fleet/agents/reset/agent.reset.component.ts +++ b/ui/src/app/pages/fleet/agents/reset/agent.reset.component.ts @@ -10,7 +10,7 @@ import { STRINGS } from 'assets/text/strings'; export class AgentResetComponent { strings = STRINGS.agents; - @Input() selected: any[] = []; + @Input() size: Number; validationInput: Number; @@ -28,6 +28,6 @@ export class AgentResetComponent { } isEnabled(): boolean { - return this.validationInput === this.selected.length; + return this.validationInput === this.size; } -} +} \ No newline at end of file diff --git a/ui/src/app/pages/fleet/agents/view/agent.view.component.html b/ui/src/app/pages/fleet/agents/view/agent.view.component.html index 59da9207e..041f6fb04 100644 --- a/ui/src/app/pages/fleet/agents/view/agent.view.component.html +++ b/ui/src/app/pages/fleet/agents/view/agent.view.component.html @@ -4,41 +4,36 @@

Agent View

-
- -
- +
+ +
+
+ +
+
+
+ + + + {{ agent?.state | ngxCapitalize }} +
-
-
- - - - {{ agent?.state | ngxCapitalize }} - -
-
- - Last activity - - today, at {{ agent?.ts_last_hb | date: 'HH:mm z' }} - - - on {{ agent?.ts_last_hb | date: 'M/d/yy, HH:mm z' }} - +
+ + Last activity + + today, at {{ agent?.ts_last_hb | date: 'HH:mm z' }} - - This Agent has been provisioned but never connected. + + on {{ agent?.ts_last_hb | date: 'M/d/yy, HH:mm z' }} -
+
+ + This Agent has been provisioned but never connected. +
@@ -47,42 +42,21 @@
-
- - -
-
- -
-
- - -
-
-
- -
-
- - -
-
- -
-
-
- -
-
- -
-
- -
-
-
-
+
+
+ + +
+ +
+ + + + + +
diff --git a/ui/src/app/pages/fleet/agents/view/agent.view.component.scss b/ui/src/app/pages/fleet/agents/view/agent.view.component.scss index 682b95a3d..15c572b3b 100644 --- a/ui/src/app/pages/fleet/agents/view/agent.view.component.scss +++ b/ui/src/app/pages/fleet/agents/view/agent.view.component.scss @@ -21,14 +21,10 @@ h4 { line-height: 2rem; margin-bottom: 1.5rem; } -nb-tab { - padding: 0 !important; - overflow: hidden !important; -} + nb-card { border: transparent; border-radius: 0.5rem; - padding: 0 !important; nb-card-header { background-color: #232940; @@ -202,31 +198,18 @@ nb-card { color: #969fb9; font-size: 14px; } - -.state-circle { - width: 9px; - height: 9px; - border-radius: 50%; +.state { + font-size: 15px; + font-weight: 700; +} +.fa.fa-circle { + font-size: 11px; } .offline-circle { - width: 9px; - height: 9px; + width: 10px; + height: 10px; border: 2px solid #969fb9; border-radius: 50%; background-color: transparent; } -.state { - font-size: 15px; - font-weight: 700; - font-family: 'Montserrat'; -} -.state-div { - margin-bottom: 23px; -} -.date { - font-size: 14px; - font-weight: 400; - margin-top: 23px; - line-height: 1.25rem; -} diff --git a/ui/src/app/pages/fleet/agents/view/agent.view.component.ts b/ui/src/app/pages/fleet/agents/view/agent.view.component.ts index e0b62d7c6..ca6091fa6 100644 --- a/ui/src/app/pages/fleet/agents/view/agent.view.component.ts +++ b/ui/src/app/pages/fleet/agents/view/agent.view.component.ts @@ -38,9 +38,6 @@ export class AgentViewComponent implements OnInit, OnDestroy { agentSubscription: Subscription; - configFile = 'configFile'; - default = 'default'; - constructor( protected agentsService: AgentsService, protected route: ActivatedRoute, diff --git a/ui/src/app/pages/fleet/groups/add/agent.group.add.component.scss b/ui/src/app/pages/fleet/groups/add/agent.group.add.component.scss index 270143948..89db8a024 100644 --- a/ui/src/app/pages/fleet/groups/add/agent.group.add.component.scss +++ b/ui/src/app/pages/fleet/groups/add/agent.group.add.component.scss @@ -136,7 +136,7 @@ mat-chip nb-icon { color: #969fb9 !important; } label { - color: #969fb9; + color: #969FB9; } ::ng-deep .orb-breadcrumb { align-items: center; @@ -279,6 +279,6 @@ mat-chip-list { .review-label { font-family: 'Montserrat'; font-size: 13px; - font-weight: 400 !important; + font-weight: 400 !important; margin: 0; } diff --git a/ui/src/app/pages/fleet/groups/delete/agent.group.delete.component.scss b/ui/src/app/pages/fleet/groups/delete/agent.group.delete.component.scss index ab675c0a8..8ac634efb 100644 --- a/ui/src/app/pages/fleet/groups/delete/agent.group.delete.component.scss +++ b/ui/src/app/pages/fleet/groups/delete/agent.group.delete.component.scss @@ -1,6 +1,6 @@ nb-card { max-width: 38rem !important; - padding: 0 !important; + nb-card-header { background: #232940 !important; color: #969fb9 !important; @@ -12,10 +12,6 @@ nb-card { p { color: #969fb9 !important; - margin-bottom: 1rem !important; - font-weight: 500 !important; - font-size: 14px !important; - line-height: 24px !important; } .ns1-red { diff --git a/ui/src/app/pages/fleet/groups/list/agent.group.list.component.ts b/ui/src/app/pages/fleet/groups/list/agent.group.list.component.ts index 8c1c67e64..9009c7a39 100644 --- a/ui/src/app/pages/fleet/groups/list/agent.group.list.component.ts +++ b/ui/src/app/pages/fleet/groups/list/agent.group.list.component.ts @@ -265,7 +265,7 @@ export class AgentGroupListComponent } onOpenDeleteSelected() { const selected = this.selected; - const elementName = 'Agent Groups'; + const elementName = "Agent Groups" this.dialogService .open(DeleteSelectedComponent, { context: { selected, elementName }, @@ -284,7 +284,7 @@ export class AgentGroupListComponent deleteSelectedAgentGroups() { this.selected.forEach((group) => { this.agentGroupsService.deleteAgentGroup(group.id).subscribe(); - }); + }) this.notificationsService.success('All selected Groups delete requests succeeded', ''); } openDetailsModal(row: any) { @@ -308,10 +308,15 @@ export class AgentGroupListComponent closeOnEsc: true, }); } - public onCheckboxChange(event: any, row: any): void { + public onCheckboxChange(event: any, row: any): void { + let selectedGroup = { + id: row.id, + name: row.name, + } if (this.getChecked(row) === false) { - this.selected.push(row); - } else { + this.selected.push(selectedGroup); + } + else { for (let i = 0; i < this.selected.length; i++) { if (this.selected[i].id === row.id) { this.selected.splice(i, 1); @@ -331,7 +336,11 @@ export class AgentGroupListComponent this.groupsSubscription = this.filteredGroups$.subscribe(rows => { this.selected = []; rows.forEach(row => { - this.selected.push(row); + const policySelected = { + id: row.id, + name: row.name, + } + this.selected.push(policySelected); }); }); } else { diff --git a/ui/src/app/pages/pages-menu.ts b/ui/src/app/pages/pages-menu.ts index ca4f376f5..be3ab77b0 100644 --- a/ui/src/app/pages/pages-menu.ts +++ b/ui/src/app/pages/pages-menu.ts @@ -52,5 +52,5 @@ export const MENU_ITEMS = [ export function updateMenuItems(pageName: string) { MENU_ITEMS.forEach(item => { item.selected = item.title === pageName; - }); + }) } diff --git a/ui/src/app/pages/profile/profile.component.scss b/ui/src/app/pages/profile/profile.component.scss index 7112e9ff1..3e435b593 100644 --- a/ui/src/app/pages/profile/profile.component.scss +++ b/ui/src/app/pages/profile/profile.component.scss @@ -1,6 +1,5 @@ button { float: right; - font-family: 'Montserrat'; } .card-row { @@ -59,58 +58,58 @@ h4 { } } .header-subtitle { - color: #969fb9; + color: #969FB9; font-family: Montserrat; font-size: 14px; font-style: normal; - font-weight: 400; + font-weight: 400; margin: 0; } .account-information-card { width: 500px !important; height: fit-content; } -.circle { +.circle { width: 42px; - height: 42px; - border-radius: 50%; + height: 42px; + border-radius: 50%; } -.info-container { +.info-container { display: flex; - align-items: center; + align-items: center; position: relative; } .user-name-title { - color: var(--Lilac-gray, #969fb9); + color: var(--Lilac-gray, #969FB9); font-size: 14px; font-style: normal; font-weight: 500; line-height: 18px; - letter-spacing: -0.5px; + letter-spacing: -0.5px; margin-bottom: 5px; } .user-name { - color: var(--White, #fff); + color: var(--White, #FFF); font-size: 14px; font-weight: 500; line-height: 18px; - letter-spacing: -0.5px; + letter-spacing: -0.5px; } .edit-button { - color: #3089fc; + color: #3089FC; background-color: transparent; border: none; outline: none; font-size: 14px; font-style: normal; - font-weight: 600; + font-weight: 600; transition: background-color 0.3s ease !important; transition: color 0.3s ease !important; border-radius: 16px; padding: 5px 10px; } .edit-button:disabled { - color: #969fb9; + color: #969FB9 } .edit-button-work { @extend .edit-button; @@ -136,18 +135,19 @@ nb-card { border-radius: 8px !important; color: #969fb9 !important; padding: 0.5rem 1rem !important; - font-weight: 600 !important; - font-size: 15px !important; + font-weight: 600 !important; + font-size: 15px !important; + } nb-card-body { margin: 0 !important; - background-color: #2b3148 !important; + background-color: #2B3148 !important; border-bottom-left-radius: 8px !important; border-bottom-right-radius: 8px !important; } } label { - color: #969fb9; + color: #969FB9; } .float-right { float: right; @@ -157,7 +157,7 @@ input { } .input-password { margin-bottom: 20px; - background-color: #313e5d !important; + background-color: #313E5D !important; border: none; border-radius: 2px; } diff --git a/ui/src/app/pages/profile/profile.component.ts b/ui/src/app/pages/profile/profile.component.ts index 0516ea4a5..b69e0e366 100644 --- a/ui/src/app/pages/profile/profile.component.ts +++ b/ui/src/app/pages/profile/profile.component.ts @@ -31,14 +31,14 @@ export class ProfileComponent implements OnInit { showPassword2 = false; showPassword3 = false; - availableTimers = [15, 30, 60]; + availableTimers = [15, 30, 60] selectedTimer: Number; editMode = { work: false, profileName: false, password: false, - }; + } isPasswordValidSize: boolean; isPasswordValidMatch: boolean; @@ -49,13 +49,13 @@ export class ProfileComponent implements OnInit { private usersService: UsersService, private notificationsService: NotificationsService, private orb: OrbService, - ) { + ) { this.oldPasswordInput = ''; this.newPasswordInput = ''; this.confirmPasswordInput = ''; this.selectedTimer = this.getPollInterval(); } - + ngOnInit(): void { this.retrieveUserInfo(); } @@ -86,7 +86,7 @@ export class ProfileComponent implements OnInit { company: company, }, }; - + this.usersService.editUser(userReq).subscribe( resp => { this.notificationsService.success('User successfully edited', ''); @@ -96,10 +96,10 @@ export class ProfileComponent implements OnInit { }, error => { this.isRequesting = false; - }, + } ); } - + canChangePassword(): boolean { this.isPasswordValidSize = this.newPasswordInput.length >= this.ngxAdminMinPasswordSize; this.isPasswordValidMatch = this.newPasswordInput === this.confirmPasswordInput; @@ -125,7 +125,7 @@ export class ProfileComponent implements OnInit { }, error => { this.isRequesting = false; - }, + } ); } toggleEdit(name: string) { diff --git a/ui/src/app/pages/sinks/add/sink-add.component.html b/ui/src/app/pages/sinks/add/sink-add.component.html index 3150d3241..8ce2016f7 100644 --- a/ui/src/app/pages/sinks/add/sink-add.component.html +++ b/ui/src/app/pages/sinks/add/sink-add.component.html @@ -41,8 +41,8 @@

{{ strings.sink.add.header }}

>
-
- +
+
diff --git a/ui/src/app/pages/sinks/add/sink-add.component.scss b/ui/src/app/pages/sinks/add/sink-add.component.scss index 74ba37b65..d988b3e98 100644 --- a/ui/src/app/pages/sinks/add/sink-add.component.scss +++ b/ui/src/app/pages/sinks/add/sink-add.component.scss @@ -3,7 +3,7 @@ button { margin: 0 3px; float: left; color: #fff !important; - font-family: 'Montserrat', sans-serif; + font-family: "Montserrat", sans-serif; font-weight: 500; text-transform: none !important; } @@ -18,7 +18,7 @@ button { } .sink-cancel { - background-color: #3089fc !important; + background-color: #3089fc !important; } @@ -64,4 +64,4 @@ button { } } } - + \ No newline at end of file diff --git a/ui/src/app/pages/sinks/add/sink-add.component.ts b/ui/src/app/pages/sinks/add/sink-add.component.ts index 66acb9f44..82c088f50 100644 --- a/ui/src/app/pages/sinks/add/sink-add.component.ts +++ b/ui/src/app/pages/sinks/add/sink-add.component.ts @@ -27,10 +27,8 @@ export class SinkAddComponent { sinkBackend: any; - isRequesting: boolean; - - errorConfigMessage: string; - + isRequesting: boolean; + constructor( private sinksService: SinksService, private notificationsService: NotificationsService, @@ -39,31 +37,25 @@ export class SinkAddComponent { ) { this.createMode = true; this.isRequesting = false; - this.errorConfigMessage = ''; } canCreate() { const detailsValid = this.createMode ? this.detailsComponent?.formGroup?.status === 'VALID' : true; - + const configSink = this.configComponent?.code; let config; - + if (this.editor.isJson(configSink)) { config = JSON.parse(configSink); } else if (this.editor.isYaml(configSink)) { config = YAML.parse(configSink); - this.errorConfigMessage = ''; } else { - this.errorConfigMessage = 'Invalid YAML configuration, check syntax errors'; return false; } - - return !this.editor.checkEmpty(config.authentication) - && !this.editor.checkEmpty(config.exporter) - && detailsValid - && !this.checkString(config); + + return !this.editor.checkEmpty(config.authentication) && !this.editor.checkEmpty(config.exporter) && detailsValid && !this.checkString(config); } checkString(config: any): boolean { if (typeof config.authentication.password !== 'string' || typeof config.authentication.username !== 'string') { @@ -79,7 +71,7 @@ export class SinkAddComponent { const configSink = this.configComponent.code; const details = { ...sinkDetails }; - + let payload = {}; const config = YAML.parse(configSink); diff --git a/ui/src/app/pages/sinks/delete/sink.delete.component.scss b/ui/src/app/pages/sinks/delete/sink.delete.component.scss index 003444e84..8ac634efb 100644 --- a/ui/src/app/pages/sinks/delete/sink.delete.component.scss +++ b/ui/src/app/pages/sinks/delete/sink.delete.component.scss @@ -1,6 +1,5 @@ nb-card { max-width: 38rem !important; - padding: 0 !important; nb-card-header { background: #232940 !important; @@ -13,10 +12,6 @@ nb-card { p { color: #969fb9 !important; - margin-bottom: 1rem !important; - font-weight: 500 !important; - font-size: 14px !important; - line-height: 24px !important; } .ns1-red { diff --git a/ui/src/app/pages/sinks/details/sink.details.component.html b/ui/src/app/pages/sinks/details/sink.details.component.html index d8463242d..b545e8b33 100644 --- a/ui/src/app/pages/sinks/details/sink.details.component.html +++ b/ui/src/app/pages/sinks/details/sink.details.component.html @@ -18,7 +18,7 @@

{{strings.propNames.description}}

{{ sink.description }}

-

No Description Added

+

No Description Added

diff --git a/ui/src/app/pages/sinks/details/sink.details.component.ts b/ui/src/app/pages/sinks/details/sink.details.component.ts index 272a4d159..28c66ed29 100644 --- a/ui/src/app/pages/sinks/details/sink.details.component.ts +++ b/ui/src/app/pages/sinks/details/sink.details.component.ts @@ -27,7 +27,7 @@ export class SinkDetailsComponent implements OnInit { protected router: Router, ) { !this.sink.tags ? this.sink.tags = {} : null; - this.exporterField = ''; + this.exporterField = ""; } onOpenEdit(sink: any) { @@ -45,6 +45,6 @@ export class SinkDetailsComponent implements OnInit { } ngOnInit() { const exporter = this.sink.config.exporter; - this.exporterField = exporter.remote_host !== undefined ? 'Remote Host URL' : 'Endpoint URL'; + this.exporterField = exporter.remote_host !== undefined ? "Remote Host URL" : "Endpoint URL"; } } diff --git a/ui/src/app/pages/sinks/list/sink.list.component.scss b/ui/src/app/pages/sinks/list/sink.list.component.scss index 171421572..25a0c8de2 100644 --- a/ui/src/app/pages/sinks/list/sink.list.component.scss +++ b/ui/src/app/pages/sinks/list/sink.list.component.scss @@ -149,16 +149,7 @@ tr div p { color: #df316f; } &idle { - color: #f2994a; - } - &provisioning { - color: #3089fc; - } - &provioning_error { - color: #df316f; - } - &warning { - color: #f2c94c; + color: #f2994a; } } diff --git a/ui/src/app/pages/sinks/list/sink.list.component.ts b/ui/src/app/pages/sinks/list/sink.list.component.ts index cb18d5225..2d122f679 100644 --- a/ui/src/app/pages/sinks/list/sink.list.component.ts +++ b/ui/src/app/pages/sinks/list/sink.list.component.ts @@ -271,7 +271,7 @@ export class SinkListComponent implements AfterViewInit, AfterViewChecked, OnDes } onOpenDeleteSelected() { const selected = this.selected; - const elementName = 'Sinks'; + const elementName = "Sinks" this.dialogService .open(DeleteSelectedComponent, { context: { selected, elementName }, @@ -290,7 +290,7 @@ export class SinkListComponent implements AfterViewInit, AfterViewChecked, OnDes deleteSelectedSinks() { this.selected.forEach((sink) => { this.sinkService.deleteSink(sink.id).subscribe(); - }); + }) this.notificationsService.success('All selected Sinks delete requests succeeded', ''); } openDetailsModal(row: any) { @@ -309,15 +309,16 @@ export class SinkListComponent implements AfterViewInit, AfterViewChecked, OnDes filterByInactive = (sink) => sink.state === 'inactive'; - public onCheckboxChange(event: any, row: any): void { + public onCheckboxChange(event: any, row: any): void { const sinkSelected = { id: row.id, name: row.name, state: row.state, - }; + } if (this.getChecked(row) === false) { this.selected.push(sinkSelected); - } else { + } + else { for (let i = 0; i < this.selected.length; i++) { if (this.selected[i].id === row.id) { this.selected.splice(i, 1); @@ -340,7 +341,7 @@ export class SinkListComponent implements AfterViewInit, AfterViewChecked, OnDes id: row.id, name: row.name, state: row.state, - }; + } this.selected.push(sinkSelected); }); }); diff --git a/ui/src/app/pages/sinks/view/sink.view.component.html b/ui/src/app/pages/sinks/view/sink.view.component.html index 5a43fa250..51b3822f5 100644 --- a/ui/src/app/pages/sinks/view/sink.view.component.html +++ b/ui/src/app/pages/sinks/view/sink.view.component.html @@ -12,51 +12,53 @@

{{ strings.sink.view.header }}

- -
- - - + +
+
+ + + +
+
+
+ + + {{ sink?.state | ngxCapitalize }} +
-
-
- - - {{ sink?.state | ngxCapitalize }} - -
-
- - Created on {{ sink?.ts_created | date: 'M/d/yy, HH:mm z' }} - -
+
+ + Created on {{ sink?.ts_created | date: 'M/d/yy, HH:mm z' }} +
+
@@ -68,8 +70,8 @@

{{ strings.sink.view.header }}

-
- +
+
diff --git a/ui/src/app/pages/sinks/view/sink.view.component.scss b/ui/src/app/pages/sinks/view/sink.view.component.scss index 6074d883e..c4a64214a 100644 --- a/ui/src/app/pages/sinks/view/sink.view.component.scss +++ b/ui/src/app/pages/sinks/view/sink.view.component.scss @@ -3,7 +3,7 @@ button { &.policy-duplicate { color: #fff !important; - font-family: 'Montserrat', sans-serif; + font-family: "Montserrat", sans-serif; font-weight: 700; text-transform: none !important; @@ -18,7 +18,7 @@ button { &.policy-save { color: #fff !important; - font-family: 'Montserrat', sans-serif; + font-family: "Montserrat", sans-serif; font-weight: 700; text-transform: none !important; @@ -33,7 +33,7 @@ button { &.policy-discard { color: #fff !important; - font-family: 'Montserrat', sans-serif; + font-family: "Montserrat", sans-serif; font-weight: 700; text-transform: none !important; @@ -96,15 +96,12 @@ h4 { } } } -.state-circle { - width: 9px; - height: 9px; - border-radius: 50%; +.fa.fa-circle { + font-size: 11px; } .state { - font-size: 15px; + font-size: 16px; font-weight: 700; - font-family: 'Montserrat'; } .orb-service- { &active { @@ -117,16 +114,7 @@ h4 { color: #df316f; } &idle { - color: #f2994a; - } - &provisioning { - color: #3089fc; - } - &provioning_error { - color: #df316f; - } - &warning { - color: #f2c94c; + color: #f2994a; } } @@ -134,13 +122,4 @@ h4 { color: #969fb9; font-size: 14px; } -.state-div { - margin-bottom: 23px; -} -.date { - font-size: 14px; - font-weight: 400; - margin-top: 23px; - line-height: 1.25rem; -} diff --git a/ui/src/app/pages/sinks/view/sink.view.component.spec.ts b/ui/src/app/pages/sinks/view/sink.view.component.spec.ts index e4c6bad9e..b1b7437e0 100644 --- a/ui/src/app/pages/sinks/view/sink.view.component.spec.ts +++ b/ui/src/app/pages/sinks/view/sink.view.component.spec.ts @@ -8,7 +8,7 @@ describe('SinkViewComponent', () => { beforeEach(async(() => { TestBed.configureTestingModule({ - declarations: [ SinkViewComponent ], + declarations: [ SinkViewComponent ] }) .compileComponents(); })); diff --git a/ui/src/app/pages/sinks/view/sink.view.component.ts b/ui/src/app/pages/sinks/view/sink.view.component.ts index ab8678fe3..490876744 100644 --- a/ui/src/app/pages/sinks/view/sink.view.component.ts +++ b/ui/src/app/pages/sinks/view/sink.view.component.ts @@ -17,11 +17,11 @@ import { OrbService } from 'app/common/services/orb.service'; @Component({ selector: 'ngx-sink-view', templateUrl: './sink.view.component.html', - styleUrls: ['./sink.view.component.scss'], + styleUrls: ['./sink.view.component.scss'] }) export class SinkViewComponent implements OnInit, OnChanges, OnDestroy { strings = STRINGS; - + isLoading = false; sink: Sink; @@ -33,16 +33,14 @@ export class SinkViewComponent implements OnInit, OnChanges, OnDestroy { lastUpdate: Date | null = null; sinkStates = SinkStates; - + editMode = { details: false, config: false, - }; + } isRequesting: boolean; - errorConfigMessage: string; - @ViewChild(SinkDetailsComponent) detailsComponent: SinkDetailsComponent; @ViewChild(SinkConfigComponent) @@ -56,9 +54,8 @@ export class SinkViewComponent implements OnInit, OnChanges, OnDestroy { private dialogService: NbDialogService, private router: Router, private orb: OrbService, - ) { + ) { this.isRequesting = false; - this.errorConfigMessage = ''; } ngOnInit(): void { @@ -77,14 +74,10 @@ export class SinkViewComponent implements OnInit, OnChanges, OnDestroy { } isEditMode() { - const resp = Object.values(this.editMode).reduce( + return Object.values(this.editMode).reduce( (prev, cur) => prev || cur, false, ); - if (!resp) { - this.errorConfigMessage = ''; - } - return resp; } canSave() { @@ -100,9 +93,7 @@ export class SinkViewComponent implements OnInit, OnChanges, OnDestroy { config = JSON.parse(configSink); } else if (this.editor.isYaml(configSink)) { config = YAML.parse(configSink); - this.errorConfigMessage = ''; } else { - this.errorConfigMessage = 'Invalid YAML configuration, check syntax errors'; return false; } @@ -130,24 +121,19 @@ export class SinkViewComponent implements OnInit, OnChanges, OnDestroy { const sinkDetails = this.detailsComponent.formGroup?.value; const tags = this.detailsComponent.selectedTags; const configSink = this.configComponent.code; - + const details = { ...sinkDetails, tags }; - + + let payload = { id, backend, config: {}}; + try { - let payload: any; - if (this.editMode.config && !this.editMode.details) { - payload = { id, backend, config: {}}; - const config = YAML.parse(configSink); - payload.config = config; - } - if (this.editMode.details && !this.editMode.config) { - payload = { id, backend, ...details }; - } - if (this.editMode.details && this.editMode.config) { - payload = { id, backend, ...details, config: {}}; - const config = YAML.parse(configSink); - payload.config = config; + const config = YAML.parse(configSink); + payload.config = config; + + if (this.editMode.details) { + payload = { ...payload, ...details }; } + this.sinks.editSink(payload as Sink).subscribe( (resp) => { this.discard(); @@ -170,7 +156,7 @@ export class SinkViewComponent implements OnInit, OnChanges, OnDestroy { this.isLoading = false; this.cdr.markForCheck(); this.lastUpdate = new Date(); - }); + }) } ngOnDestroy(): void { @@ -201,8 +187,8 @@ export class SinkViewComponent implements OnInit, OnChanges, OnDestroy { hasChanges() { const sinkDetails = this.detailsComponent.formGroup?.value; const tags = this.detailsComponent.selectedTags; - const selectedTags = JSON.stringify(tags); - const orb_tags = this.sink.tags ? JSON.stringify(this.sink.tags) : '{}'; + let selectedTags = JSON.stringify(tags); + let orb_tags = this.sink.tags ? JSON.stringify(this.sink.tags) : "{}"; if (sinkDetails.name !== this.sink.name || sinkDetails?.description !== this.sink?.description || selectedTags !== orb_tags) { return true; diff --git a/ui/src/app/shared/components/delete/delete.selected.component.html b/ui/src/app/shared/components/delete/delete.selected.component.html index 01db8514b..a068e1781 100644 --- a/ui/src/app/shared/components/delete/delete.selected.component.html +++ b/ui/src/app/shared/components/delete/delete.selected.component.html @@ -11,34 +11,19 @@

Are you sure you want to delete a total of {{ selected?.length }} {{ elementName }}? This action cannot be undone.

-
-
-
- {{ item.name }} -
-
- {{ item.state | titlecase }} - {{ item.usage | titlecase }} - -
-
-
-
-

*To confirm, type the amount of {{ elementName }} to be delete.

- - - {{selected?.length}} - +
+ {{ item.name }} {{ item.state | titlecase }} {{ item.usage | titlecase }}
+

*To confirm, type the amount of {{ elementName }} to be delete.

+ + + {{selected?.length}} + -   Status:  +  Status:  {{ policy?.state }} -    Version:  +   Version:  {{ policy?.version }} -    Backend:  +   Backend:  {{ policy?.backend }} diff --git a/ui/src/app/shared/components/orb/agent/agent-policies-datasets/agent-policies-datasets.component.scss b/ui/src/app/shared/components/orb/agent/agent-policies-datasets/agent-policies-datasets.component.scss index 2cd1a73f1..e1c6e53eb 100644 --- a/ui/src/app/shared/components/orb/agent/agent-policies-datasets/agent-policies-datasets.component.scss +++ b/ui/src/app/shared/components/orb/agent/agent-policies-datasets/agent-policies-datasets.component.scss @@ -10,7 +10,6 @@ h4 { nb-card { border: transparent; border-radius: 0.5rem; - padding: 0 !important; nb-card-header { background-color: #232940; @@ -192,6 +191,7 @@ nb-list-item { line-height: 1; max-width: 360px !important; white-space: nowrap; + overflow: hidden; text-overflow: ellipsis; } // nb-accordion-item-header { @@ -200,8 +200,9 @@ nb-list-item { .scroll { max-height: 20em; } -.field { +.field{ white-space: nowrap; overflow: hidden; text-overflow: ellipsis; + min-width: 5ch; } diff --git a/ui/src/app/shared/components/orb/agent/agent-policies-datasets/agent-policies-datasets.component.ts b/ui/src/app/shared/components/orb/agent/agent-policies-datasets/agent-policies-datasets.component.ts index 6c7f5bb98..45f7f651c 100644 --- a/ui/src/app/shared/components/orb/agent/agent-policies-datasets/agent-policies-datasets.component.ts +++ b/ui/src/app/shared/components/orb/agent/agent-policies-datasets/agent-policies-datasets.component.ts @@ -51,22 +51,18 @@ export class AgentPoliciesDatasetsComponent implements OnInit, OnChanges { this.amountRunningPolicies = 0; } - identify(index, item) { - return item.id; - } - - ngOnInit(): void { + ngOnInit(): void { this.getAmountRunningPolicies(); } - + getAmountRunningPolicies() { this.policies.forEach(element => { - if (element.state === 'running') { + if (element.state == 'running') { this.amountRunningPolicies++; } - }); + }); } - + ngOnChanges(changes: SimpleChanges): void { if (changes.agent) { const policiesStates = this.agent?.last_hb_data?.policy_state; diff --git a/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.html b/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.html index a064d02a3..54f375229 100644 --- a/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.html +++ b/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.html @@ -1,42 +1,89 @@ - + - Default Provisioning Command + Provisioning Commands + + + {{ option | titlecase }} + + + - -
-      
-      
-        {{ defaultCommandShow }}
-      
-    
-
-
- - - - Provisioning Command with Configuration File - - - - - - - -
-      
+          
+            {{ defaultCommandShow }}
+          
+        
+
+
+ + + Provisioning Command with Configuration File + - - {{ fileConfigCommandShow }} - - - - - + + + + +
+          
+          
+            {{ fileConfigCommandShow }}
+          
+        
+
+
+

+ Click here + +   to learn more about how create and apply configuration files. +

+ + diff --git a/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.scss b/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.scss index 831954453..cf3f935a0 100644 --- a/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.scss +++ b/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.scss @@ -10,8 +10,6 @@ h4 { nb-card { border: transparent; border-radius: 0.5rem; - padding: 0 !important; - height: calc(100% - 40px) !important; nb-card-header { background-color: #232940; @@ -24,16 +22,15 @@ nb-card { margin: 0; } nb-icon { - float: right; + float: right ; } } nb-card-body { border-bottom-left-radius: 0.5rem; border-bottom-right-radius: 0.5rem; - margin: 0 !important; - padding: 0 1rem; - background-color: #1c2339 !important; + margin: 0 2rem 0 2rem; + padding: 0; label { color: #969fb9; @@ -51,9 +48,12 @@ nb-card { pre { display: flex; - + flex-direction: row-reverse; + flex-wrap: wrap; + align-content: space-between; + align-items: flex-start; + justify-content: space-between; margin: 0; - height: fit-content !important; background: transparent; padding: 0.75rem; @@ -64,17 +64,15 @@ nb-card { background: transparent; border: 0 transparent; color: #969fb9; + top: -0.25rem; + float: right; + right: -0.5rem; outline: none; - position: absolute; - top: 0; - right: 1.3rem; } code { color: #ffffff; line-height: 2.5 !important; - font-size: 14px !important; - float: left; } } } @@ -122,5 +120,15 @@ nb-card { button { box-shadow: none !important; outline: none !important; + margin-right: 15px; +} +a { + white-space: nowrap !important; + padding: 0 0.3rem !important; + background-color: #ff9f05 !important; + font-weight: 700 !important; + text-decoration: none !important; + color: #ffffff !important; + border-radius: 10px; + fill: #3089fc !important; } - diff --git a/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts b/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts index ffc9ce535..d3eead01b 100644 --- a/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts +++ b/ui/src/app/shared/components/orb/agent/agent-provisioning/agent-provisioning.component.ts @@ -1,16 +1,14 @@ -import { Component, Input, OnInit } from '@angular/core'; -import { AvailableOS } from 'app/common/services/agents/agents.service'; -import { Agent, AgentStates } from 'app/common/interfaces/orb/agent.interface'; +import { Component, Input, OnInit } from "@angular/core"; +import { AvailableOS } from "app/common/services/agents/agents.service"; +import { Agent, AgentStates } from "app/common/interfaces/orb/agent.interface"; @Component({ - selector: 'ngx-agent-provisioning', - templateUrl: './agent-provisioning.component.html', - styleUrls: ['./agent-provisioning.component.scss'], + selector: "ngx-agent-provisioning", + templateUrl: "./agent-provisioning.component.html", + styleUrls: ["./agent-provisioning.component.scss"], }) export class AgentProvisioningComponent implements OnInit { - @Input() agent: Agent; - @Input() provisioningType: string; agentStates = AgentStates; @@ -25,30 +23,26 @@ export class AgentProvisioningComponent implements OnInit { fileConfigCommandCopy: string; fileConfigCommandShow: string; - provisioningTypeMode = { - default: false, - configFile: false, - }; + hideCommand: boolean; + hideCommand2: boolean; + hideCommand3: boolean; constructor() { - this.copyCommandIcon = 'copy-outline'; + this.copyCommandIcon = "copy-outline"; } ngOnInit(): void { - if (this.provisioningType === 'default') { - this.provisioningTypeMode.default = true; - } else if (this.provisioningType === 'configFile') { - this.provisioningTypeMode.configFile = true; - - } + this.hideCommand2 = false; + this.hideCommand3 = true; + this.hideCommand = this.agent?.state !== this.agentStates.new; this.makeCommand2Copy(); } toggleIcon(target) { - if (target === 'command') { - this.copyCommandIcon = 'checkmark-outline'; + if (target === "command") { + this.copyCommandIcon = "checkmark-outline"; setTimeout(() => { - this.copyCommandIcon = 'copy-outline'; + this.copyCommandIcon = "copy-outline"; }, 2000); } } @@ -88,4 +82,18 @@ orbcommunity/orb-agent run -c /usr/local/orb/agent.yaml`; -v \${PWD}/:/usr/local/orb/ \\ orbcommunity/orb-agent run -c /usr/local/orb/agent.yaml`; } + +toggleProvisioningCommand(command: string) { + switch (command) { + case 'hideCommand': + this.hideCommand = !this.hideCommand; + break; + case 'hideCommand2': + this.hideCommand2 = !this.hideCommand2; + break; + case 'hideCommand3': + this.hideCommand3 = !this.hideCommand3; + break; + } +} } diff --git a/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.html b/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.html index 20bffceeb..d475094ee 100644 --- a/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.html +++ b/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.html @@ -10,10 +10,10 @@
-
+
+ class="orb orb-table-small">
diff --git a/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.scss b/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.scss index bd413f131..8da3a1ec3 100644 --- a/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.scss +++ b/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.scss @@ -1,7 +1,6 @@ nb-card { border: transparent; border-radius: 0.5rem; - padding: 0 !important; nb-card-header { background-color: #232940; @@ -27,14 +26,7 @@ nb-card { } } } -.dataset-table { - min-width: 600px; - height: 200px; - max-height: 300px; - min-height: 100px; - -} .summary-accent { color: #969fb9 !important; } @@ -91,7 +83,7 @@ nb-card { } } -mat-nav-list { +mat-nav-list{ display: flex !important; flex-direction: row; flex-wrap: nowrap !important; @@ -129,4 +121,4 @@ mat-nav-list { to { transform: translateX(-80%); } -} +} \ No newline at end of file diff --git a/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.ts b/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.ts index 5d7fccaae..581214a66 100644 --- a/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.ts +++ b/ui/src/app/shared/components/orb/policy/policy-datasets/policy-datasets.component.ts @@ -101,7 +101,7 @@ export class PolicyDatasetsComponent name: 'Agent Group', resizeable: true, canAutoResize: true, - flexGrow: 3, + width: 250, cellTemplate: this.groupTemplateCell, }, { @@ -109,7 +109,8 @@ export class PolicyDatasetsComponent name: 'Valid', resizeable: true, canAutoResize: true, - flexGrow: 1, + minWidth: 80, + width: 80, cellTemplate: this.validTemplateCell, }, { @@ -117,7 +118,7 @@ export class PolicyDatasetsComponent name: 'Sinks', resizeable: true, canAutoResize: true, - flexGrow: 3, + width: 450, cellTemplate: this.sinksTemplateCell, }, { @@ -125,8 +126,7 @@ export class PolicyDatasetsComponent prop: 'actions', resizeable: true, sortable: false, - canAutoResize: true, - flexGrow: 3, + width: 200, cellTemplate: this.actionsTemplateCell, }, ]; @@ -146,11 +146,7 @@ export class PolicyDatasetsComponent window.dispatchEvent(new Event('resize')); } } - getTableHeight() { - const rowHeight = 50; - const headerHeight = 50; - return (this.datasets.length * rowHeight) + headerHeight + 'px'; - } + onCreateDataset() { this.dialogService .open(DatasetFromComponent, { @@ -177,7 +173,6 @@ export class PolicyDatasetsComponent closeOnEsc: false, context: { dataset, - policy: this.policy, }, hasScroll: false, closeOnBackdropClick: true, @@ -251,5 +246,5 @@ export class PolicyDatasetsComponent closeOnEsc: true, }); } - + } diff --git a/ui/src/app/shared/components/orb/policy/policy-details/policy-details.component.html b/ui/src/app/shared/components/orb/policy/policy-details/policy-details.component.html index 228c1b505..8dc65bdb5 100644 --- a/ui/src/app/shared/components/orb/policy/policy-details/policy-details.component.html +++ b/ui/src/app/shared/components/orb/policy/policy-details/policy-details.component.html @@ -31,10 +31,9 @@

{{ policy?.name }}

-
+
-

{{ policy?.description }}

-

No description provided

+

{{ policy?.description }}

@@ -101,5 +100,4 @@ -
diff --git a/ui/src/app/shared/components/orb/policy/policy-details/policy-details.component.scss b/ui/src/app/shared/components/orb/policy/policy-details/policy-details.component.scss index 0c14986b2..1c7fd3ae2 100644 --- a/ui/src/app/shared/components/orb/policy/policy-details/policy-details.component.scss +++ b/ui/src/app/shared/components/orb/policy/policy-details/policy-details.component.scss @@ -1,7 +1,6 @@ nb-card { border: transparent; border-radius: 0.5rem; - padding: 0 !important; nb-card-header { background-color: #232940; @@ -12,9 +11,6 @@ nb-card { } nb-card-body { - padding-bottom: 0 !important; - margin: 0 !important; - label { color: #969fb9; } @@ -31,11 +27,7 @@ nb-card { } } } -.italic { - font-style: italic; - font-size: 0.9rem; - color: #d9deee; -} + .summary-accent { color: #969fb9 !important; } diff --git a/ui/src/app/shared/components/orb/policy/policy-details/policy-details.component.ts b/ui/src/app/shared/components/orb/policy/policy-details/policy-details.component.ts index 013d917aa..bd96f3558 100644 --- a/ui/src/app/shared/components/orb/policy/policy-details/policy-details.component.ts +++ b/ui/src/app/shared/components/orb/policy/policy-details/policy-details.component.ts @@ -86,7 +86,8 @@ export class PolicyDetailsComponent implements OnInit, OnChanges { this.editMode = value; if (this.editMode || this.interfaceEditMode) { this.orb.pausePolling(); - } else { + } + else { this.orb.startPolling(); } this.updateForm(); diff --git a/ui/src/app/shared/components/orb/policy/policy-groups/policy-groups.component.html b/ui/src/app/shared/components/orb/policy/policy-groups/policy-groups.component.html index a843d8a97..cfdbd4b9a 100644 --- a/ui/src/app/shared/components/orb/policy/policy-groups/policy-groups.component.html +++ b/ui/src/app/shared/components/orb/policy/policy-groups/policy-groups.component.html @@ -2,7 +2,7 @@ Assigned Groups
- + Group: {{ group?.name }} diff --git a/ui/src/app/shared/components/orb/policy/policy-groups/policy-groups.component.scss b/ui/src/app/shared/components/orb/policy/policy-groups/policy-groups.component.scss index 18fc107bb..c6572a462 100644 --- a/ui/src/app/shared/components/orb/policy/policy-groups/policy-groups.component.scss +++ b/ui/src/app/shared/components/orb/policy/policy-groups/policy-groups.component.scss @@ -14,7 +14,7 @@ button { nb-card { border: transparent; border-radius: 0.5rem; - padding: 0 !important; + nb-card-header { background-color: #232940; border-bottom: transparent; @@ -177,7 +177,7 @@ nb-card { border: none !important; border-radius: 8px !important; display: grid; - padding: 0 10px; + padding: 0 10px 0 10px; background-color: #1c2339; .item-body { diff --git a/ui/src/app/shared/components/orb/policy/policy-groups/policy-groups.component.ts b/ui/src/app/shared/components/orb/policy/policy-groups/policy-groups.component.ts index 7b72c7395..5dae8b988 100644 --- a/ui/src/app/shared/components/orb/policy/policy-groups/policy-groups.component.ts +++ b/ui/src/app/shared/components/orb/policy/policy-groups/policy-groups.component.ts @@ -56,7 +56,4 @@ export class PolicyGroupsComponent implements OnInit, OnChanges { unique(value, index, self) { return self.indexOf(value) === index; } - identify(index, item) { - return item.id; - } } diff --git a/ui/src/app/shared/components/orb/policy/policy-interface/policy-interface.component.html b/ui/src/app/shared/components/orb/policy/policy-interface/policy-interface.component.html index bc651fb82..f66dd5180 100644 --- a/ui/src/app/shared/components/orb/policy/policy-interface/policy-interface.component.html +++ b/ui/src/app/shared/components/orb/policy/policy-interface/policy-interface.component.html @@ -17,28 +17,18 @@ style="color: #df316f !important;"> Discard - - - - - - {{ errorConfigMessage }} +
+ + +
diff --git a/ui/src/app/shared/components/orb/policy/policy-interface/policy-interface.component.scss b/ui/src/app/shared/components/orb/policy/policy-interface/policy-interface.component.scss index aa80deaae..96cac03f9 100644 --- a/ui/src/app/shared/components/orb/policy/policy-interface/policy-interface.component.scss +++ b/ui/src/app/shared/components/orb/policy/policy-interface/policy-interface.component.scss @@ -1,7 +1,7 @@ nb-card { border: transparent; border-radius: 0.5rem; - padding: 0 !important; + nb-card-header { background-color: #232940; border-bottom: transparent; @@ -12,7 +12,6 @@ nb-card { nb-card-body { padding: 0.25rem !important; - margin: 0 !important; label { color: #969fb9; @@ -63,34 +62,15 @@ nb-card { } .code-editor { + height: calc(100%); width: calc(100%); padding: calc(1rem); - min-height: 367px; - max-height: 55vh; -} -.upload-button { - color: #3089fc; - background-color: transparent; - border: none; - font-weight: 600; - outline: none; - float: right; - border-radius: 15px; - padding: 6px 12px; - margin-right: 5px; - font-size: 0.875rem; - font-family: 'Montserrat'; - transition: background-color 0.3s ease; -} -.upload-button:hover { - background-color: #171c30 !important; } -.errorMessage { - position: absolute; - color: #df316f; - font-weight: 600; - font-size: 13px; - left: 20px; - bottom: 3px; +.code-editor-wrapper { + min-height: 350px; + min-width: 200px; + height: calc(45vh); + width: calc(100%); + display: block; } diff --git a/ui/src/app/shared/components/orb/policy/policy-interface/policy-interface.component.ts b/ui/src/app/shared/components/orb/policy/policy-interface/policy-interface.component.ts index 01a5c0f79..2599fbded 100644 --- a/ui/src/app/shared/components/orb/policy/policy-interface/policy-interface.component.ts +++ b/ui/src/app/shared/components/orb/policy/policy-interface/policy-interface.component.ts @@ -13,8 +13,6 @@ import { AgentPolicy } from 'app/common/interfaces/orb/agent.policy.interface'; import { FormBuilder, FormControl, Validators } from '@angular/forms'; import IStandaloneEditorConstructionOptions = monaco.editor.IStandaloneEditorConstructionOptions; import { OrbService } from 'app/common/services/orb.service'; -import { EditorComponent } from 'ngx-monaco-editor'; - @Component({ selector: 'ngx-policy-interface', @@ -34,11 +32,8 @@ export class PolicyInterfaceComponent implements OnInit, AfterViewInit, OnChange @Input() detailsEditMode: boolean; - @Input() - errorConfigMessage: string; - - @ViewChild(EditorComponent, { static: true }) - editorComponent: EditorComponent; + @ViewChild('editorComponent') + editor; editorOptions: IStandaloneEditorConstructionOptions = { theme: 'vs-dark', @@ -47,11 +42,10 @@ export class PolicyInterfaceComponent implements OnInit, AfterViewInit, OnChange detectIndentation: true, tabSize: 2, autoIndent: 'full', - formatOnPaste: true, trimAutoWhitespace: true, formatOnType: true, matchBrackets: 'always', - language: 'json', + language: 'yaml', automaticLayout: true, glyphMargin: false, folding: true, @@ -66,8 +60,6 @@ export class PolicyInterfaceComponent implements OnInit, AfterViewInit, OnChange formControl: FormControl; - - constructor( private fb: FormBuilder, private orb: OrbService, @@ -78,25 +70,10 @@ export class PolicyInterfaceComponent implements OnInit, AfterViewInit, OnChange this.editModeChange = new EventEmitter(); this.updateForm(); this.detailsEditMode = false; - this.errorConfigMessage = ''; - } - - getCodeLineCount() { - const editorInstance = this.editorComponent['_editor']; - if (editorInstance) { - const model = editorInstance.getModel(); - editorInstance.layout(); - return model ? model.getLineCount() : 0; - - } - return 0; } ngOnInit(): void { this.code = this.policy.policy_data || JSON.stringify(this.policy.policy, null, 2); - if (this.policy.format === 'yaml') { - this.editorOptions = { ...this.editorOptions, language: 'yaml' }; - } } ngAfterViewInit() { @@ -123,23 +100,12 @@ export class PolicyInterfaceComponent implements OnInit, AfterViewInit, OnChange this.editMode = edit; if (this.editMode || this.detailsEditMode) { this.orb.pausePolling(); - } else { + } + else { this.orb.startPolling(); } this.editorOptions = { ...this.editorOptions, readOnly: !edit }; this.updateForm(); !!notify && this.editModeChange.emit(this.editMode); } - - onFileSelected(event: any) { - const file: File = event.target.files[0]; - const reader: FileReader = new FileReader(); - - reader.onload = (e: any) => { - const fileContent = e.target.result; - this.code = fileContent; - }; - - reader.readAsText(file); - } } diff --git a/ui/src/app/shared/components/orb/sink-control/sink-control.component.scss b/ui/src/app/shared/components/orb/sink-control/sink-control.component.scss index d86b6561e..37944713e 100644 --- a/ui/src/app/shared/components/orb/sink-control/sink-control.component.scss +++ b/ui/src/app/shared/components/orb/sink-control/sink-control.component.scss @@ -1,3 +1,3 @@ .sink-selector { - width: 100%; + width: 560px; } diff --git a/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.html b/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.html index c3584bdef..05d4d1e02 100644 --- a/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.html +++ b/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.html @@ -33,17 +33,16 @@ #editorComponent [(ngModel)]="code" [options]="editorOptions" - class="code-editor editor-height-{{createMode}}" + class="code-editor" ngDefaultControl *ngIf="!isYaml"> - {{ errorConfigMessage }}
diff --git a/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.scss b/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.scss index 3a9f5216e..cdd48cfd7 100644 --- a/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.scss +++ b/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.scss @@ -1,11 +1,5 @@ - -.editor-height- { - &true { - height: 25.5rem; - } - &false { - height: 22.5rem; - } +ngx-monaco-editor { + height: 25rem; } .summary-accent { color: #969fb9 !important; @@ -47,7 +41,6 @@ nb-card { } nb-card-body { - margin: 0 !important; label { color: #969fb9; } @@ -57,11 +50,3 @@ nb-card { } } } - .errorMessage { - position: absolute; - color: #df316f; - font-weight: 600; - font-size: 13px; - left: 24px; - bottom: 2px; - } diff --git a/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.spec.ts b/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.spec.ts index 9da64fdd6..9ffa7cd11 100644 --- a/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.spec.ts +++ b/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.spec.ts @@ -8,7 +8,7 @@ describe('SinkConfigComponent', () => { beforeEach(async(() => { TestBed.configureTestingModule({ - declarations: [ SinkConfigComponent ], + declarations: [ SinkConfigComponent ] }) .compileComponents(); })); diff --git a/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.ts b/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.ts index 528f06023..ce30aa4b3 100644 --- a/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.ts +++ b/ui/src/app/shared/components/orb/sink/sink-config/sink-config.component.ts @@ -8,7 +8,7 @@ import { OrbService } from 'app/common/services/orb.service'; @Component({ selector: 'ngx-sink-config', templateUrl: './sink-config.component.html', - styleUrls: ['./sink-config.component.scss'], + styleUrls: ['./sink-config.component.scss'] }) export class SinkConfigComponent implements OnInit, OnChanges { @@ -16,23 +16,20 @@ export class SinkConfigComponent implements OnInit, OnChanges { sink: Sink; @Input() - editMode: boolean = false; + editMode: boolean; @Input() - createMode: boolean = false; + createMode: boolean; @Input() sinkBackend: string; - + @Output() editModeChange: EventEmitter; @Input() detailsEditMode: boolean; - @Input() - errorConfigMessage: string; - @ViewChild('editorComponent') editor; @@ -80,43 +77,43 @@ export class SinkConfigComponent implements OnInit, OnChanges { constructor( private fb: FormBuilder, private orb: OrbService, - ) { - this.isYaml = true; + ) { + this.isYaml = true; this.sink = {}; this.editMode = false; this.editModeChange = new EventEmitter(); this.detailsEditMode = false; this.updateForm(); - this.errorConfigMessage = ''; this.sinkConfigSchemaPrometheus = { - 'authentication' : { - 'type': 'basicauth', - 'password': '', - 'username': '', + "authentication" : { + "type": "basicauth", + "password": "", + "username": "", }, - 'exporter' : { - 'remote_host': '', + "exporter" : { + "remote_host": "", }, - 'opentelemetry': 'enabled', - }; + "opentelemetry": "enabled", + } this.sinkConfigSchemaOtlp = { - 'authentication' : { - 'type': 'basicauth', - 'password': '', - 'username': '', + "authentication" : { + "type": "basicauth", + "password": "", + "username": "", }, - 'exporter' : { - 'endpoint': '', + "exporter" : { + "endpoint": "", }, - 'opentelemetry': 'enabled', - }; + "opentelemetry": "enabled", + } } ngOnInit(): void { if (this.createMode) { this.toggleEdit(true); this.code = YAML.stringify(this.sinkConfigSchemaOtlp); - } else { + } + else { // if (this.sink.config_data && this.sink.format === 'yaml') { // this.isYaml = true; const parsedCode = YAML.parse(JSON.stringify(this.sink.config)); @@ -146,7 +143,7 @@ ngOnChanges(changes: SimpleChanges) { const sinkConfigSchema = this.sinkBackend === SinkBackends.prometheus ? this.sinkConfigSchemaPrometheus : this.sinkConfigSchemaOtlp; - + this.code = this.isYaml ? YAML.stringify(sinkConfigSchema, null) : JSON.stringify(sinkConfigSchema, null, 2); @@ -176,7 +173,8 @@ updateForm() { this.editMode = edit; if ((this.editMode || this.detailsEditMode) && !this.createMode) { this.orb.pausePolling(); - } else { + } + else { this.orb.startPolling(); } this.editorOptions = { ...this.editorOptions, readOnly: !edit }; @@ -189,10 +187,11 @@ updateForm() { if (this.isYaml) { const parsedCode = YAML.parse(this.code); this.code = YAML.stringify(parsedCode); - } else { + } + else { const parsedConfig = YAML.parse(this.code); this.code = JSON.stringify(parsedConfig, null, 2); } } - + } diff --git a/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.html b/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.html index fed3c1da6..4057269f2 100644 --- a/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.html +++ b/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.html @@ -30,10 +30,9 @@

{{ sink?.name }}

-
+
-

{{ sink?.description }}

-

No description provided

+

{{ sink?.description }}

@@ -41,9 +40,8 @@
-

{{ sink?.state | titlecase }}

-

{{ sink?.state | titlecase }} {{ sink?.error }}

-

{{ sink?.state | titlecase }} {{ sink?.error }}

+

{{ sink?.state | titlecase }}

+

{{ sink?.state | titlecase }} {{ sink?.error | titlecase }}

diff --git a/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.scss b/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.scss index 68cbb9112..4cc2a27c4 100644 --- a/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.scss +++ b/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.scss @@ -24,7 +24,7 @@ input { color: #df316f; } &idle { - color: #f2994a; + color: #f2994a; } } .ns1red { @@ -44,7 +44,6 @@ nb-card { padding: 0.5rem 1rem; } nb-card-body { - margin: 0 !important; label { color: #969fb9; } @@ -53,9 +52,4 @@ nb-card { text-align: end; } } -} -.italic { - font-style: italic; - font-size: 0.9rem; - color: #d9deee; -} +} \ No newline at end of file diff --git a/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.spec.ts b/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.spec.ts index 1de3f89a7..a9ede211d 100644 --- a/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.spec.ts +++ b/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.spec.ts @@ -8,7 +8,7 @@ describe('SinkDetailsComponent', () => { beforeEach(async(() => { TestBed.configureTestingModule({ - declarations: [ SinkDetailsComponent ], + declarations: [ SinkDetailsComponent ] }) .compileComponents(); })); diff --git a/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.ts b/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.ts index d872b90a8..f778e651c 100644 --- a/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.ts +++ b/ui/src/app/shared/components/orb/sink/sink-details/sink-details.component.ts @@ -10,7 +10,7 @@ import { SinksService } from 'app/common/services/sinks/sinks.service'; @Component({ selector: 'ngx-sink-details', templateUrl: './sink-details.component.html', - styleUrls: ['./sink-details.component.scss'], + styleUrls: ['./sink-details.component.scss'] }) export class SinkDetailsComponent implements OnInit, OnChanges { @@ -47,7 +47,7 @@ export class SinkDetailsComponent implements OnInit, OnChanges { private fb: FormBuilder, private sinksService: SinksService, private orb: OrbService, - ) { + ) { this.sink = {}; this.createMode = false; this.editMode = false; @@ -59,7 +59,7 @@ export class SinkDetailsComponent implements OnInit, OnChanges { Promise.all([this.getSinkBackends()]).then((responses) => { const backends = responses[0]; this.sinkTypesList = backends.map(entry => entry.backend); - }); + }) } ngOnInit(): void { @@ -92,10 +92,11 @@ export class SinkDetailsComponent implements OnInit, OnChanges { description: [description], }); this.selectedTags = {...tags} || {}; - } else if (this.createMode) { + } + else if (this.createMode) { const { name, description, backend, tags } = this.sink; - + this.formGroup = this.fb.group({ name: [name, [Validators.required, Validators.pattern('^[a-zA-Z_][a-zA-Z0-9_-]*$'), Validators.maxLength(64)]], description: [description, [Validators.maxLength(64)]], @@ -103,7 +104,8 @@ export class SinkDetailsComponent implements OnInit, OnChanges { }); this.selectedTags = { ...tags }; - } else { + } + else { this.formGroup = this.fb.group({ name: null, description: null, @@ -116,7 +118,8 @@ export class SinkDetailsComponent implements OnInit, OnChanges { this.editMode = value; if (this.editMode || this.configEditMode) { this.orb.pausePolling(); - } else { + } + else { this.orb.startPolling(); } this.updateForm(); @@ -124,15 +127,17 @@ export class SinkDetailsComponent implements OnInit, OnChanges { } getMode() { - if (this.editMode === true) { + if(this.editMode == true) { this.mode = 'edit'; - } else if (this.createMode === true) { + } + else if (this.createMode == true) { this.mode = 'create'; - } else { + } + else { this.mode = 'read'; } } - + getSinkBackends() { return new Promise(resolve => { this.sinksService.getSinkBackends().subscribe(backends => { diff --git a/ui/src/app/shared/components/orb/tag-control/tag-control.component.scss b/ui/src/app/shared/components/orb/tag-control/tag-control.component.scss index 19175b341..c55079c9b 100644 --- a/ui/src/app/shared/components/orb/tag-control/tag-control.component.scss +++ b/ui/src/app/shared/components/orb/tag-control/tag-control.component.scss @@ -19,7 +19,7 @@ mat-chip nb-icon { border: none !important; background: #1e2941 !important; } -.add-button:disabled { +.add-button:disabled{ background-color: transparent !important; } .two-dot { diff --git a/ui/tslint.json b/ui/tslint.json index e31c452a7..f64dba908 100644 --- a/ui/tslint.json +++ b/ui/tslint.json @@ -62,6 +62,7 @@ "no-switch-case-fall-through": true, "no-trailing-whitespace": true, "no-unnecessary-initializer": true, + "no-use-before-declare": true, "no-var-keyword": true, "object-literal-sort-keys": false, "one-line": [ @@ -95,6 +96,7 @@ "variable-declaration": "nospace" } ], + "typeof-compare": true, "unified-signatures": true, "variable-name": false, "whitespace": [ @@ -133,6 +135,7 @@ "use-lifecycle-interface": true, "use-pipe-transform-interface": true, "component-class-suffix": true, - "directive-class-suffix": true + "directive-class-suffix": true, + "no-unused-variable": true } }