diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d163863 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +build/ \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..a5f1d6e --- /dev/null +++ b/Makefile @@ -0,0 +1,119 @@ +# Copyright (c) Mainflux +# SPDX-License-Identifier: Apache-2.0 + +# Adapted for Orb project, modifications licensed under MPL v. 2.0: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. */ +include agent/docker/.env + +# expects to be set as env var +PRODUCTION_AGENT_REF_TAG ?= latest +PRODUCTION_AGENT_DEBUG_REF_TAG ?= latest-debug +REF_TAG ?= develop +DEBUG_REF_TAG ?= develop-debug +PKTVISOR_TAG ?= latest-develop +PKTVISOR_DEBUG_TAG ?= latest-develop-debug +DOCKER_IMAGE_NAME_PREFIX ?= orb +DOCKERHUB_REPO = netboxlabs +ORB_DOCKERHUB_REPO = netboxlabs +BUILD_DIR = build +CGO_ENABLED ?= 0 +GOARCH ?= $(shell dpkg-architecture -q DEB_BUILD_ARCH) +GOOS ?= $(shell dpkg-architecture -q DEB_TARGET_ARCH_OS) +ORB_VERSION = $(shell cat VERSION) +COMMIT_HASH = $(shell git rev-parse --short HEAD) +OTEL_COLLECTOR_CONTRIB_VERSION ?= 0.91.0 +OTEL_CONTRIB_URL ?= "https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v$(OTEL_COLLECTOR_CONTRIB_VERSION)/otelcol-contrib_$(OTEL_COLLECTOR_CONTRIB_VERSION)_$(GOOS)_$(GOARCH).tar.gz" + + +define run_test + go test -mod=mod -short -race -count 1 -tags test $(shell go list ./... | grep -v 'cmd' | grep '$(SERVICE)') +endef + +define run_test_coverage + go test -mod=mod -short -race -count 1 -tags test -cover -coverprofile=coverage.out -covermode=atomic $(shell go list ./... | grep -v 'cmd' | grep '$(SERVICE)') +endef + +all: platform + +.PHONY: all agent agent_bin + +clean: + rm -rf ${BUILD_DIR} + +cleandocker: + # Stops containers and removes containers, networks, volumes, and images created by up +# docker-compose -f docker/docker-compose.yml down --rmi all -v --remove-orphans + docker-compose -f docker/docker-compose.yml down -v --remove-orphans + +ifdef pv + # Remove unused volumes + docker volume ls -f name=$(DOCKER_IMAGE_NAME_PREFIX) -f dangling=true -q | xargs -r docker volume rm +endif + +test: + go test -mod=mod -short -race -count 1 -tags test $(shell go list ./... | grep -v 'cmd') + +run_test_service: test_service $(2) + +run_test_service_cov: test_service_cov $(2) + +test_service: + $(call run_test,$(@)) + +test_service_cov: + $(call run_test_coverage,$(@)) + +agent_bin: + echo "ORB_VERSION: $(ORB_VERSION)-$(COMMIT_HASH)" + CGO_ENABLED=$(CGO_ENABLED) GOOS=linux GOARCH=$(GOARCH) GOARM=$(GOARM) go build -mod=mod -ldflags "-extldflags '-static' -X 'github.com/netboxlabs/orb-agent/buildinfo.version=$(ORB_VERSION)-$(COMMIT_HASH)'" -o ${BUILD_DIR}/$(DOCKER_IMAGE_NAME_PREFIX)-agent cmd/main.go + +agent: + docker build --no-cache \ + --build-arg GOARCH=$(GOARCH) \ + --build-arg PKTVISOR_TAG=$(PKTVISOR_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(REF_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(ORB_VERSION) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(ORB_VERSION)-$(COMMIT_HASH) \ + -f agent/docker/Dockerfile . + +agent_full: + docker build --no-cache \ + --build-arg GOARCH=$(GOARCH) \ + --build-arg PKTVISOR_TAG=$(PKTVISOR_TAG) \ + --build-arg ORB_TAG=${REF_TAG} \ + --build-arg OTEL_TAG=${OTEL_COLLECTOR_CONTRIB_VERSION} \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent-full:$(REF_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent-full:$(ORB_VERSION) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent-full:$(ORB_VERSION)-$(COMMIT_HASH) \ + -f agent/docker/Dockerfile.full . + +agent_debug: + docker build \ + --build-arg PKTVISOR_TAG=$(PKTVISOR_DEBUG_TAG) \ + --tag=$(DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(DEBUG_REF_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(DEBUG_REF_TAG) \ + -f agent/docker/Dockerfile . + +agent_production: + docker build \ + --build-arg PKTVISOR_TAG=$(PKTVISOR_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(PRODUCTION_AGENT_REF_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(ORB_VERSION) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(ORB_VERSION)-$(COMMIT_HASH) \ + -f agent/docker/Dockerfile . + +agent_debug_production: + docker build \ + --build-arg PKTVISOR_TAG=$(PKTVISOR_DEBUG_TAG) \ + --tag=$(ORB_DOCKERHUB_REPO)/$(DOCKER_IMAGE_NAME_PREFIX)-agent:$(PRODUCTION_AGENT_DEBUG_REF_TAG) \ + -f agent/docker/Dockerfile . + +pull-latest-otel-collector-contrib: + wget -O ./agent/backend/otel/otelcol_contrib.tar.gz $(OTEL_CONTRIB_URL) + tar -xvf ./agent/backend/otel/otelcol_contrib.tar.gz -C ./agent/backend/otel/ + cp ./agent/backend/otel/otelcol-contrib . + rm ./agent/backend/otel/otelcol_contrib.tar.gz + rm ./agent/backend/otel/LICENSE + rm ./agent/backend/otel/README.md diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..c25c8e5 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.30.0 diff --git a/agent/README.md b/agent/README.md new file mode 100644 index 0000000..e752e42 --- /dev/null +++ b/agent/README.md @@ -0,0 +1,59 @@ +# Orb Agent + +Agent - Fleet Synchronization Steps + +Success Communication Sequence Diagram +```mermaid + sequenceDiagram + Agent->>Fleet: subscribe + Agent->>Fleet: sendCapabilities + Agent->>+Fleet: sendGroupMembershipReq + Fleet-->>-Agent: sendGroupMembership with GroupIds + Agent-->>Fleet: Heartbeat + Agent-->>+Fleet: sendAgentPoliciesReq + Fleet-->>-Agent: agentPolicies with Policies + Agent-->>Fleet: Heartbeat +``` + + +Fail Communication Sequence Diagram +```mermaid + sequenceDiagram + Agent->>Fleet: subscribe + Agent->>Fleet: sendCapabilities + Agent->>+Fleet: sendGroupMembershipReq + Agent-->>Fleet: Heartbeat + Agent-->>+Fleet: sendAgentPoliciesReq + Fleet-->>-Agent: agentPolicies with Policies + Agent-->>Fleet: Heartbeat +``` + +Agent is still without Groups and Policies + + +With Re-Request Mechanism, general idea + +```mermaid +sequenceDiagram + Agent-)Fleet: subscribe + Agent-)Fleet: sendCapabilities + Agent-)Fleet: sendGroupMembershipReq + activate Fleet + Agent->>Timer: starts wait timer for response + activate Timer + Timer-xAgent: timer runs out + Agent-)Fleet: sendGroupMembershipReq + Fleet--)Agent: sendGroupMembership with GroupIds + deactivate Fleet + Agent-->>Timer: marks as success + deactivate Timer + Agent-->>Fleet: Heartbeat + Agent-->>Fleet: sendAgentPoliciesReq + activate Fleet + Agent->>Timer: starts wait timer for response + activate Timer + Fleet-->>Agent: agentPolicies with Policies + Agent->>Timer: marks as success + deactivate Timer + Agent-->>Fleet: Heartbeat +``` diff --git a/agent/agent.go b/agent/agent.go new file mode 100644 index 0000000..40847dc --- /dev/null +++ b/agent/agent.go @@ -0,0 +1,318 @@ +package agent + +import ( + "context" + "errors" + "fmt" + "runtime" + "time" + + mqtt "github.com/eclipse/paho.mqtt.golang" + "github.com/fatih/structs" + "github.com/google/uuid" + "github.com/jmoiron/sqlx" + _ "github.com/mattn/go-sqlite3" + "github.com/netboxlabs/orb-agent/agent/backend" + "github.com/netboxlabs/orb-agent/agent/cloud_config" + "github.com/netboxlabs/orb-agent/agent/config" + manager "github.com/netboxlabs/orb-agent/agent/policyMgr" + "github.com/netboxlabs/orb-agent/buildinfo" + "go.uber.org/zap" +) + +var ( + ErrMqttConnection = errors.New("failed to connect to a broker") +) + +type Agent interface { + Start(ctx context.Context, cancelFunc context.CancelFunc) error + Stop(ctx context.Context) + RestartAll(ctx context.Context, reason string) error + RestartBackend(ctx context.Context, backend string, reason string) error +} + +type orbAgent struct { + logger *zap.Logger + config config.Config + client mqtt.Client + agent_id string + db *sqlx.DB + backends map[string]backend.Backend + backendState map[string]*backend.State + cancelFunction context.CancelFunc + rpcFromCancelFunc context.CancelFunc + + asyncContext context.Context + + hbTicker *time.Ticker + heartbeatCtx context.Context + heartbeatCancel context.CancelFunc + + // Agent RPC channel, configured from command line + baseTopic string + rpcToCoreTopic string + rpcFromCoreTopic string + capabilitiesTopic string + heartbeatsTopic string + logTopic string + + // Retry Mechanism to ensure the Request is received + groupRequestTicker *time.Ticker + groupRequestSucceeded context.CancelFunc + policyRequestTicker *time.Ticker + policyRequestSucceeded context.CancelFunc + + // AgentGroup channels sent from core + groupsInfos map[string]GroupInfo + + policyManager manager.PolicyManager +} + +const retryRequestDuration = time.Second +const retryRequestFixedTime = 15 +const retryDurationIncrPerAttempts = 10 +const retryMaxAttempts = 4 + +type GroupInfo struct { + Name string + ChannelID string +} + +var _ Agent = (*orbAgent)(nil) + +func New(logger *zap.Logger, c config.Config) (Agent, error) { + logger.Info("using local config db", zap.String("filename", c.OrbAgent.DB.File)) + db, err := sqlx.Connect("sqlite3", c.OrbAgent.DB.File) + if err != nil { + return nil, err + } + + pm, err := manager.New(logger, c, db) + if err != nil { + logger.Error("error during create policy manager, exiting", zap.Error(err)) + return nil, err + } + if pm.GetRepo() == nil { + logger.Error("policy manager failed to get repository", zap.Error(err)) + return nil, err + } + return &orbAgent{logger: logger, config: c, policyManager: pm, db: db, groupsInfos: make(map[string]GroupInfo)}, nil +} + +func (a *orbAgent) startBackends(agentCtx context.Context) error { + a.logger.Info("registered backends", zap.Strings("values", backend.GetList())) + a.logger.Info("requested backends", zap.Any("values", a.config.OrbAgent.Backends)) + if len(a.config.OrbAgent.Backends) == 0 { + return errors.New("no backends specified") + } + a.backends = make(map[string]backend.Backend, len(a.config.OrbAgent.Backends)) + a.backendState = make(map[string]*backend.State) + for name, configurationEntry := range a.config.OrbAgent.Backends { + if !backend.HaveBackend(name) { + return errors.New("specified backend does not exist: " + name) + } + be := backend.GetBackend(name) + configuration := structs.Map(a.config.OrbAgent.Otel) + configuration["agent_tags"] = a.config.OrbAgent.Tags + if err := be.Configure(a.logger, a.policyManager.GetRepo(), configurationEntry, configuration); err != nil { + a.logger.Info("failed to configure backend", zap.String("backend", name), zap.Error(err)) + return err + } + backendCtx := context.WithValue(agentCtx, "routine", name) + if a.config.OrbAgent.Cloud.MQTT.Id != "" { + backendCtx = context.WithValue(backendCtx, "agent_id", a.config.OrbAgent.Cloud.MQTT.Id) + } else { + backendCtx = context.WithValue(backendCtx, "agent_id", "auto-provisioning-without-id") + } + a.backends[name] = be + initialState := be.GetInitialState() + a.backendState[name] = &backend.State{ + Status: initialState, + LastRestartTS: time.Now(), + } + if err := be.Start(context.WithCancel(backendCtx)); err != nil { + a.logger.Info("failed to start backend", zap.String("backend", name), zap.Error(err)) + var errMessage string + if initialState == backend.BackendError { + errMessage = err.Error() + } + a.backendState[name] = &backend.State{ + Status: initialState, + LastError: errMessage, + LastRestartTS: time.Now(), + } + return err + } + } + return nil +} + +func (a *orbAgent) Start(ctx context.Context, cancelFunc context.CancelFunc) error { + startTime := time.Now() + defer func(t time.Time) { + a.logger.Debug("Startup of agent execution duration", zap.String("Start() execution duration", time.Since(t).String())) + }(startTime) + agentCtx := context.WithValue(ctx, "routine", "agentRoutine") + asyncCtx, cancelAllAsync := context.WithCancel(context.WithValue(ctx, "routine", "asyncParent")) + a.asyncContext = asyncCtx + a.rpcFromCancelFunc = cancelAllAsync + a.cancelFunction = cancelFunc + a.logger.Info("agent started", zap.String("version", buildinfo.GetVersion()), zap.Any("routine", agentCtx.Value("routine"))) + mqtt.CRITICAL = &agentLoggerCritical{a: a} + mqtt.ERROR = &agentLoggerError{a: a} + + if a.config.OrbAgent.Debug.Enable { + a.logger.Info("debug logging enabled") + mqtt.DEBUG = &agentLoggerDebug{a: a} + } + + ccm, err := cloud_config.New(a.logger, a.config, a.db) + if err != nil { + return err + } + cloudConfig, err := ccm.GetCloudConfig() + if err != nil { + return err + } + + commsCtx := context.WithValue(agentCtx, "routine", "comms") + if err := a.startComms(commsCtx, cloudConfig); err != nil { + a.logger.Error("could not start mqtt client") + return err + } + + if err := a.startBackends(ctx); err != nil { + return err + } + + a.logonWithHeartbeat() + + return nil +} + +func (a *orbAgent) logonWithHeartbeat() { + a.hbTicker = time.NewTicker(HeartbeatFreq) + a.heartbeatCtx, a.heartbeatCancel = a.extendContext("heartbeat") + go a.sendHeartbeats(a.heartbeatCtx, a.heartbeatCancel) + a.logger.Info("heartbeat routine started") +} + +func (a *orbAgent) logoffWithHeartbeat(ctx context.Context) { + a.logger.Debug("stopping heartbeat, going offline status", zap.Any("routine", ctx.Value("routine"))) + if a.heartbeatCtx != nil { + a.heartbeatCancel() + } + if a.client != nil && a.client.IsConnected() { + a.unsubscribeGroupChannels() + if token := a.client.Unsubscribe(a.rpcFromCoreTopic); token.Wait() && token.Error() != nil { + a.logger.Warn("failed to unsubscribe to RPC channel", zap.Error(token.Error())) + } + } +} +func (a *orbAgent) Stop(ctx context.Context) { + a.logger.Info("routine call for stop agent", zap.Any("routine", ctx.Value("routine"))) + if a.rpcFromCancelFunc != nil { + a.rpcFromCancelFunc() + } + for name, b := range a.backends { + if state, _, _ := b.GetRunningStatus(); state == backend.Running { + a.logger.Debug("stopping backend", zap.String("backend", name)) + if err := b.Stop(ctx); err != nil { + a.logger.Error("error while stopping the backend", zap.String("backend", name)) + } + } + } + a.logoffWithHeartbeat(ctx) + if a.client != nil && a.client.IsConnected() { + a.client.Disconnect(0) + } + a.logger.Debug("stopping agent with number of go routines and go calls", zap.Int("goroutines", runtime.NumGoroutine()), zap.Int64("gocalls", runtime.NumCgoCall())) + if a.policyRequestSucceeded != nil { + a.policyRequestSucceeded() + } + if a.groupRequestSucceeded != nil { + a.groupRequestSucceeded() + } + defer a.cancelFunction() +} + +func (a *orbAgent) RestartBackend(ctx context.Context, name string, reason string) error { + if !backend.HaveBackend(name) { + return errors.New("specified backend does not exist: " + name) + } + + be := a.backends[name] + a.logger.Info("restarting backend", zap.String("backend", name), zap.String("reason", reason)) + a.backendState[name].RestartCount += 1 + a.backendState[name].LastRestartTS = time.Now() + a.backendState[name].LastRestartReason = reason + a.logger.Info("removing policies", zap.String("backend", name)) + if err := a.policyManager.RemoveBackendPolicies(be, true); err != nil { + a.logger.Error("failed to remove policies", zap.String("backend", name), zap.Error(err)) + } + configuration := structs.Map(a.config.OrbAgent.Otel) + configuration["agent_tags"] = a.config.OrbAgent.Tags + if err := be.Configure(a.logger, a.policyManager.GetRepo(), a.config.OrbAgent.Backends[name], configuration); err != nil { + return err + } + a.logger.Info("resetting backend", zap.String("backend", name)) + + if err := be.FullReset(ctx); err != nil { + a.backendState[name].LastError = fmt.Sprintf("failed to reset backend: %v", err) + a.logger.Error("failed to reset backend", zap.String("backend", name), zap.Error(err)) + } + be.SetCommsClient(a.agent_id, &a.client, fmt.Sprintf("%s/?/%s", a.baseTopic, name)) + + if err := a.sendAgentPoliciesReq(); err != nil { + a.logger.Error("failed to send agent policies request", zap.Error(err)) + } + return nil +} + +func (a *orbAgent) restartComms(ctx context.Context) error { + if a.client != nil && a.client.IsConnected() { + a.unsubscribeGroupChannels() + } + ccm, err := cloud_config.New(a.logger, a.config, a.db) + if err != nil { + return err + } + cloudConfig, err := ccm.GetCloudConfig() + if err != nil { + return err + } + if err := a.startComms(ctx, cloudConfig); err != nil { + a.logger.Error("could not restart mqtt client") + return err + } + return nil +} + +func (a *orbAgent) RestartAll(ctx context.Context, reason string) error { + if a.config.OrbAgent.Cloud.MQTT.Id != "" { + ctx = context.WithValue(ctx, "agent_id", a.config.OrbAgent.Cloud.MQTT.Id) + } else { + ctx = context.WithValue(ctx, "agent_id", "auto-provisioning-without-id") + } + a.logoffWithHeartbeat(ctx) + a.logger.Info("restarting comms", zap.String("reason", reason)) + if err := a.restartComms(ctx); err != nil { + a.logger.Error("failed to restart comms", zap.Error(err)) + } + for name := range a.backends { + a.logger.Info("restarting backend", zap.String("backend", name), zap.String("reason", reason)) + err := a.RestartBackend(ctx, name, reason) + if err != nil { + a.logger.Error("failed to restart backend", zap.Error(err)) + } + } + a.logger.Info("all backends and comms were restarted") + + return nil +} + +func (a *orbAgent) extendContext(routine string) (context.Context, context.CancelFunc) { + uuidTraceId := uuid.NewString() + a.logger.Debug("creating context for receiving message", zap.String("routine", routine), zap.String("trace-id", uuidTraceId)) + return context.WithCancel(context.WithValue(context.WithValue(a.asyncContext, "routine", routine), "trace-id", uuidTraceId)) +} diff --git a/agent/agent_prof_test.go b/agent/agent_prof_test.go new file mode 100644 index 0000000..64e1a3a --- /dev/null +++ b/agent/agent_prof_test.go @@ -0,0 +1,29 @@ +package agent + +import ( + "context" + "testing" +) + +func Test_orbAgent_startBackends(t *testing.T) { + + type args struct { + agentCtx context.Context + } + tests := []struct { + name string + + args args + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + a := orbAgent{} + if err := a.startBackends(tt.args.agentCtx); (err != nil) != tt.wantErr { + t.Errorf("startBackends() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/agent/backend/backend.go b/agent/backend/backend.go new file mode 100644 index 0000000..cdfafdd --- /dev/null +++ b/agent/backend/backend.go @@ -0,0 +1,91 @@ +package backend + +import ( + "context" + "time" + + mqtt "github.com/eclipse/paho.mqtt.golang" + "github.com/netboxlabs/orb-agent/agent/policies" + "go.uber.org/zap" +) + +const ( + Unknown RunningStatus = iota + Running + BackendError + AgentError + Offline + Waiting +) + +type RunningStatus int + +var runningStatusMap = [...]string{ + "unknown", + "running", + "backend_error", + "agent_error", + "offline", + "waiting", +} + +var runningStatusRevMap = map[string]RunningStatus{ + "unknown": Unknown, + "running": Running, + "backend_error": BackendError, + "agent_error": AgentError, + "offline": Offline, + "waiting": Waiting, +} + +type State struct { + Status RunningStatus + RestartCount int64 + LastError string + LastRestartTS time.Time + LastRestartReason string +} + +func (s RunningStatus) String() string { + return runningStatusMap[s] +} + +type Backend interface { + Configure(*zap.Logger, policies.PolicyRepo, map[string]string, map[string]interface{}) error + SetCommsClient(string, *mqtt.Client, string) + Version() (string, error) + Start(ctx context.Context, cancelFunc context.CancelFunc) error + Stop(ctx context.Context) error + FullReset(ctx context.Context) error + + GetStartTime() time.Time + GetCapabilities() (map[string]interface{}, error) + GetRunningStatus() (RunningStatus, string, error) + GetInitialState() RunningStatus + + ApplyPolicy(data policies.PolicyData, updatePolicy bool) error + RemovePolicy(data policies.PolicyData) error +} + +var registry = make(map[string]Backend) + +func Register(name string, b Backend) { + registry[name] = b +} + +func GetList() []string { + keys := make([]string, 0, len(registry)) + for k := range registry { + keys = append(keys, k) + } + return keys +} + +func HaveBackend(name string) bool { + _, prs := registry[name] + return prs +} + +func GetBackend(name string) Backend { + return registry[name] +} diff --git a/agent/backend/otel/comms.go b/agent/backend/otel/comms.go new file mode 100644 index 0000000..33a5f39 --- /dev/null +++ b/agent/backend/otel/comms.go @@ -0,0 +1,15 @@ +package otel + +import ( + "fmt" + mqtt "github.com/eclipse/paho.mqtt.golang" + "strings" +) + +func (o *openTelemetryBackend) SetCommsClient(agentID string, client *mqtt.Client, baseTopic string) { + o.mqttClient = client + otelBaseTopic := strings.Replace(baseTopic, "?", "otlp", 1) + o.otlpMetricsTopic = fmt.Sprintf("%s/m/%c", otelBaseTopic, agentID[0]) + o.otlpTracesTopic = fmt.Sprintf("%s/t/%c", otelBaseTopic, agentID[0]) + o.otlpLogsTopic = fmt.Sprintf("%s/l/%c", otelBaseTopic, agentID[0]) +} diff --git a/agent/backend/otel/exporter_builder.go b/agent/backend/otel/exporter_builder.go new file mode 100644 index 0000000..7914cb8 --- /dev/null +++ b/agent/backend/otel/exporter_builder.go @@ -0,0 +1,154 @@ +package otel + +import ( + "go.uber.org/zap" + "gopkg.in/yaml.v3" + "strconv" +) + +type ExporterBuilder interface { + GetStructFromYaml(yamlString string) (openTelemetryConfig, error) + MergeDefaultValueWithPolicy(config openTelemetryConfig, policyName string) (openTelemetryConfig, error) +} + +type openTelemetryConfig struct { + Receivers map[string]interface{} `yaml:"receivers"` + Processors map[string]interface{} `yaml:"processors,omitempty"` + Extensions map[string]interface{} `yaml:"extensions,omitempty"` + Exporters map[string]interface{} `yaml:"exporters"` + Service *service `yaml:"service"` +} + +type defaultOtlpExporter struct { + Endpoint string `yaml:"endpoint"` + Tls *tls `yaml:"tls"` +} + +type tls struct { + Insecure bool `yaml:"insecure"` +} + +type service struct { + Pipelines *pipelines `yaml:"pipelines"` + Telemetry *telemetry `yaml:"telemetry,omitempty"` +} + +type telemetry struct { + Metrics *metrics `yaml:"metrics,omitempty"` + Logs *logs `yaml:"logs,omitempty"` + Traces *traces `yaml:"traces,omitempty"` +} + +type metrics struct { + Level string `yaml:"level,omitempty"` + Address string `yaml:"address,omitempty"` +} + +type traces struct { + Enabled bool `yaml:"enabled"` +} + +type logs struct { + Enabled bool `yaml:"enabled"` +} + +type pipelines struct { + Metrics *pipeline `yaml:"metrics,omitempty"` + Traces *pipeline `yaml:"traces,omitempty"` + Logs *pipeline `yaml:"logs,omitempty"` +} + +type pipeline struct { + Exporters []string `yaml:"exporters,omitempty"` + Receivers []string `yaml:"receivers,omitempty"` + Processors []string `yaml:"processors,omitempty"` +} + +func getExporterBuilder(logger *zap.Logger, host string, port int) *exporterBuilder { + return &exporterBuilder{logger: logger, host: host, port: port} +} + +type exporterBuilder struct { + logger *zap.Logger + host string + port int +} + +func (e *exporterBuilder) GetStructFromYaml(yamlString string) (openTelemetryConfig, error) { + var config openTelemetryConfig + if err := yaml.Unmarshal([]byte(yamlString), &config); err != nil { + e.logger.Error("failed to unmarshal yaml string", zap.Error(err)) + return config, err + } + return config, nil +} + +func (e *exporterBuilder) MergeDefaultValueWithPolicy(config openTelemetryConfig, policyId string, policyName string) (openTelemetryConfig, error) { + endpoint := e.host + ":" + strconv.Itoa(e.port) + defaultOtlpExporter := defaultOtlpExporter{ + Endpoint: endpoint, + Tls: &tls{ + Insecure: true, + }, + } + + // Override any openTelemetry exporter that may come, to connect to agent's otlp receiver + config.Exporters = map[string]interface{}{ + "otlp": &defaultOtlpExporter, + } + if config.Processors == nil { + config.Processors = make(map[string]interface{}) + } + config.Processors["transform/policy_data"] = map[string]interface{}{ + "metric_statements": map[string]interface{}{ + "context": "scope", + "statements": []string{ + `set(attributes["policy_id"], "` + policyId + `")`, + `set(attributes["policy_name"], "` + policyName + `")`, + }, + }, + } + if config.Extensions == nil { + config.Extensions = make(map[string]interface{}) + } + tel := &telemetry{ + Metrics: &metrics{Level: "none"}, + } + config.Service.Telemetry = tel + // Override metrics exporter and append attributes/policy_data processor + if config.Service.Pipelines.Metrics != nil { + config.Service.Pipelines.Metrics.Exporters = []string{"otlp"} + config.Service.Pipelines.Metrics.Processors = append(config.Service.Pipelines.Metrics.Processors, "transform/policy_data") + } + if config.Service.Pipelines.Traces != nil { + config.Service.Pipelines.Traces.Exporters = []string{"otlp"} + config.Service.Pipelines.Traces.Processors = append(config.Service.Pipelines.Traces.Processors, "transform/policy_data") + } + if config.Service.Pipelines.Logs != nil { + config.Service.Pipelines.Logs.Exporters = []string{"otlp"} + config.Service.Pipelines.Logs.Processors = append(config.Service.Pipelines.Logs.Processors, "transform/policy_data") + } + return config, nil +} + +func (o *openTelemetryBackend) buildDefaultExporterAndProcessor(policyYaml string, policyId string, policyName string, telemetryPort int) (openTelemetryConfig, error) { + defaultPolicyYaml, err := yaml.Marshal(policyYaml) + if err != nil { + o.logger.Warn("yaml policy marshal failure", zap.String("policy_id", policyId)) + return openTelemetryConfig{}, err + } + defaultPolicyString := string(defaultPolicyYaml) + builder := getExporterBuilder(o.logger, o.otelReceiverHost, o.otelReceiverPort) + defaultPolicyStruct, err := builder.GetStructFromYaml(defaultPolicyString) + if err != nil { + return openTelemetryConfig{}, err + } + defaultPolicyStruct, err = builder.MergeDefaultValueWithPolicy( + defaultPolicyStruct, + policyId, + policyName) + if err != nil { + return openTelemetryConfig{}, err + } + return defaultPolicyStruct, nil +} diff --git a/agent/backend/otel/exporter_builder_test.go b/agent/backend/otel/exporter_builder_test.go new file mode 100644 index 0000000..c12ad02 --- /dev/null +++ b/agent/backend/otel/exporter_builder_test.go @@ -0,0 +1,68 @@ +package otel + +import ( + "go.uber.org/zap" + "testing" +) + +func TestBuildDefaultPolicy(t *testing.T) { + testCases := []struct { + caseName string + inputString string + policyId string + policyName string + expectedStruct openTelemetryConfig + processedString string + wantErr error + }{ + { + caseName: "success default policy test", + inputString: ` +--- +receivers: + httpcheck: + targets: + - endpoint: http://orb.live + method: GET + - endpoint: http://orb.community + method: GET + collection_interval: 60s +exporters: + otlp: + endpoint: localhost:4317 + tls: + insecure: true + logging: + verbosity: detailed + sampling_initial: 5 +service: + pipelines: + metrics: + exporters: + - otlp + receivers: + - httpcheck +`, + policyId: "test-policy-id", + policyName: "test-policy", + }, + } + for _, testCase := range testCases { + t.Run(testCase.caseName, func(t *testing.T) { + logger := zap.NewNop() + exporterBuilder := getExporterBuilder(logger, "localhost", 4317) + gotOtelConfig, err := exporterBuilder.GetStructFromYaml(testCase.inputString) + if err != nil { + t.Errorf("failed to merge default value with policy: %v", err) + } + expectedStruct, err := exporterBuilder.MergeDefaultValueWithPolicy(gotOtelConfig, testCase.policyId, testCase.policyName) + if err != nil { + t.Errorf("failed to merge default value with policy: %v", err) + } + if _, ok := expectedStruct.Processors["transform/policy_data"]; !ok { + t.Error("missing required attributes/policy_data processor", err) + } + + }) + } +} diff --git a/agent/backend/otel/otel.go b/agent/backend/otel/otel.go new file mode 100644 index 0000000..34dcc85 --- /dev/null +++ b/agent/backend/otel/otel.go @@ -0,0 +1,281 @@ +package otel + +import ( + "context" + _ "embed" + "fmt" + "os" + "strconv" + "time" + + mqtt "github.com/eclipse/paho.mqtt.golang" + "github.com/go-cmd/cmd" + "github.com/netboxlabs/orb-agent/agent/backend" + "github.com/netboxlabs/orb-agent/agent/config" + "github.com/netboxlabs/orb-agent/agent/otel" + "github.com/netboxlabs/orb-agent/agent/otel/otlpmqttexporter" + "github.com/netboxlabs/orb-agent/agent/policies" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/receiver" + "go.uber.org/zap" +) + +var _ backend.Backend = (*openTelemetryBackend)(nil) + +const DefaultPath = "/usr/local/bin/otelcol-contrib" +const DefaultHost = "localhost" +const DefaultPort = 4317 + +type openTelemetryBackend struct { + logger *zap.Logger + startTime time.Time + + //policies + policyRepo policies.PolicyRepo + policyConfigDirectory string + agentTags map[string]string + + // Context for controlling the context cancellation + mainContext context.Context + runningCollectors map[string]runningPolicy + mainCancelFunction context.CancelFunc + + // MQTT Config for OTEL MQTT Exporter + mqttConfig config.MQTTConfig + mqttClient *mqtt.Client + + otlpMetricsTopic string + otlpTracesTopic string + otlpLogsTopic string + otelReceiverTaps []string + otelCurrVersion string + + otelReceiverHost string + otelReceiverPort int + otelExecutablePath string + + metricsReceiver receiver.Metrics + metricsExporter exporter.Metrics + tracesReceiver receiver.Traces + tracesExporter exporter.Traces + logsReceiver receiver.Logs + logsExporter exporter.Logs +} + +// Configure initializes the backend with the given configuration +func (o *openTelemetryBackend) Configure(logger *zap.Logger, repo policies.PolicyRepo, + config map[string]string, otelConfig map[string]interface{}) error { + o.logger = logger + o.logger.Info("configuring OpenTelemetry backend") + o.policyRepo = repo + var err error + o.otelReceiverTaps = []string{"otelcol-contrib", "receivers", "processors", "extensions"} + o.policyConfigDirectory, err = os.MkdirTemp("", "otel-policies") + if path, ok := config["binary"]; ok { + o.otelExecutablePath = path + } else { + o.otelExecutablePath = DefaultPath + } + if err != nil { + o.logger.Error("failed to create temporary directory for policy configs", zap.Error(err)) + return err + } + if agentTags, ok := otelConfig["agent_tags"]; ok { + o.agentTags = agentTags.(map[string]string) + } + if otelPort, ok := config["otlp_port"]; ok { + o.otelReceiverPort, err = strconv.Atoi(otelPort) + if err != nil { + o.logger.Error("failed to parse otlp port using default", zap.Error(err)) + o.otelReceiverPort = DefaultPort + } + } else { + o.otelReceiverPort = DefaultPort + } + if otelHost, ok := config["otlp_host"]; ok { + o.otelReceiverHost = otelHost + } else { + o.otelReceiverHost = DefaultHost + } + + return nil +} + +func (o *openTelemetryBackend) GetInitialState() backend.RunningStatus { + return backend.Waiting +} + +func (o *openTelemetryBackend) Version() (string, error) { + if o.otelCurrVersion != "" { + return o.otelCurrVersion, nil + } + ctx, cancel := context.WithTimeout(o.mainContext, 60*time.Second) + var versionOutput string + command := cmd.NewCmd(o.otelExecutablePath, "--version") + status := command.Start() + select { + case finalStatus := <-status: + if finalStatus.Error != nil { + o.logger.Error("error during call of otelcol-contrib version", zap.Error(finalStatus.Error)) + return "", finalStatus.Error + } else { + output := finalStatus.Stdout + o.otelCurrVersion = output[0] + versionOutput = output[0] + } + case <-ctx.Done(): + o.logger.Error("timeout during getting version", zap.Error(ctx.Err())) + } + + cancel() + o.logger.Info("running opentelemetry-contrib version", zap.String("version", versionOutput)) + + return versionOutput, nil + +} + +func (o *openTelemetryBackend) Start(ctx context.Context, cancelFunc context.CancelFunc) (err error) { + o.runningCollectors = make(map[string]runningPolicy) + o.mainCancelFunction = cancelFunc + o.mainContext = ctx + o.startTime = time.Now() + currentWd, err := os.Getwd() + if err != nil { + o.otelExecutablePath = currentWd + "/otelcol-contrib" + } + currentVersion, err := o.Version() + if err != nil { + cancelFunc() + o.logger.Error("error during getting current version", zap.Error(err)) + return err + } + o.receiveOtlp() + o.logger.Info("starting open-telemetry backend using version", zap.String("version", currentVersion)) + policiesData, err := o.policyRepo.GetAll() + if err != nil { + cancelFunc() + o.logger.Error("failed to start otel backend, policies are absent") + return err + } + for _, policyData := range policiesData { + if err := o.ApplyPolicy(policyData, true); err != nil { + o.logger.Error("failed to start otel backend, failed to apply policy", zap.Error(err)) + cancelFunc() + return err + } + o.logger.Info("policy applied successfully", zap.String("policy_id", policyData.ID)) + } + + return nil +} + +func (o *openTelemetryBackend) Stop(_ context.Context) error { + o.logger.Info("stopping all running policies") + o.mainCancelFunction() + for policyID, policyEntry := range o.runningCollectors { + o.logger.Debug("stopping policy context", zap.String("policy_id", policyID)) + policyEntry.ctx.Done() + } + return nil +} + +func (o *openTelemetryBackend) FullReset(ctx context.Context) error { + o.logger.Info("restarting otel backend", zap.Int("running policies", len(o.runningCollectors))) + backendCtx, cancelFunc := context.WithCancel(context.WithValue(ctx, "routine", "otel")) + if err := o.Start(backendCtx, cancelFunc); err != nil { + return err + } + return nil +} + +func Register() bool { + backend.Register("otel", &openTelemetryBackend{}) + return true +} + +func (o *openTelemetryBackend) GetStartTime() time.Time { + return o.startTime +} + +// GetCapabilities this will only print a default backend config +func (o *openTelemetryBackend) GetCapabilities() (capabilities map[string]interface{}, err error) { + capabilities = make(map[string]interface{}) + capabilities["taps"] = o.otelReceiverTaps + return +} + +// GetRunningStatus returns cross-reference the Processes using the os, with the policies and contexts +func (o *openTelemetryBackend) GetRunningStatus() (backend.RunningStatus, string, error) { + amountCollectors := len(o.runningCollectors) + if amountCollectors > 0 { + return backend.Running, fmt.Sprintf("opentelemetry backend running with %d policies", amountCollectors), nil + } + return backend.Waiting, "opentelemetry backend is waiting for policy to come to start running", nil +} + +func (o *openTelemetryBackend) createOtlpMetricMqttExporter(ctx context.Context, cancelFunc context.CancelCauseFunc) (exporter.Metrics, error) { + bridgeService := otel.NewBridgeService(ctx, cancelFunc, &o.policyRepo, o.agentTags) + var cfg component.Config + if o.mqttClient != nil { + cfg = otlpmqttexporter.CreateConfigClient(o.mqttClient, o.otlpMetricsTopic, "", bridgeService) + } else { + cfg = otlpmqttexporter.CreateConfig(o.mqttConfig.Address, o.mqttConfig.Id, o.mqttConfig.Key, + o.mqttConfig.ChannelID, "", o.otlpMetricsTopic, bridgeService) + } + + set := otlpmqttexporter.CreateDefaultSettings(o.logger) + // Create the OTLP metrics exporter that'll receive and verify the metrics produced. + return otlpmqttexporter.CreateMetricsExporter(ctx, set, cfg) + +} + +func (o *openTelemetryBackend) createOtlpTraceMqttExporter(ctx context.Context, cancelFunc context.CancelCauseFunc) (exporter.Traces, error) { + bridgeService := otel.NewBridgeService(ctx, cancelFunc, &o.policyRepo, o.agentTags) + if o.mqttClient != nil { + cfg := otlpmqttexporter.CreateConfigClient(o.mqttClient, o.otlpTracesTopic, "", bridgeService) + set := otlpmqttexporter.CreateDefaultSettings(o.logger) + // Create the OTLP metrics metricsExporter that'll receive and verify the metrics produced. + tracerExporter, err := otlpmqttexporter.CreateTracesExporter(ctx, set, cfg) + if err != nil { + return nil, err + } + return tracerExporter, nil + } else { + cfg := otlpmqttexporter.CreateConfig(o.mqttConfig.Address, o.mqttConfig.Id, o.mqttConfig.Key, + o.mqttConfig.ChannelID, "", o.otlpTracesTopic, bridgeService) + set := otlpmqttexporter.CreateDefaultSettings(o.logger) + // Create the OTLP metrics exporter that'll receive and verify the metrics produced. + tracerExporter, err := otlpmqttexporter.CreateTracesExporter(ctx, set, cfg) + if err != nil { + return nil, err + } + return tracerExporter, nil + } + +} + +func (o *openTelemetryBackend) createOtlpLogsMqttExporter(ctx context.Context, cancelFunc context.CancelCauseFunc) (exporter.Logs, error) { + bridgeService := otel.NewBridgeService(ctx, cancelFunc, &o.policyRepo, o.agentTags) + if o.mqttClient != nil { + cfg := otlpmqttexporter.CreateConfigClient(o.mqttClient, o.otlpLogsTopic, "", bridgeService) + set := otlpmqttexporter.CreateDefaultSettings(o.logger) + // Create the OTLP metrics metricsExporter that'll receive and verify the metrics produced. + exporter, err := otlpmqttexporter.CreateLogsExporter(ctx, set, cfg) + if err != nil { + return nil, err + } + return exporter, nil + } else { + cfg := otlpmqttexporter.CreateConfig(o.mqttConfig.Address, o.mqttConfig.Id, o.mqttConfig.Key, + o.mqttConfig.ChannelID, "", o.otlpLogsTopic, bridgeService) + set := otlpmqttexporter.CreateDefaultSettings(o.logger) + // Create the OTLP metrics exporter that'll receive and verify the metrics produced. + exporter, err := otlpmqttexporter.CreateLogsExporter(ctx, set, cfg) + if err != nil { + return nil, err + } + return exporter, nil + } + +} diff --git a/agent/backend/otel/policy.go b/agent/backend/otel/policy.go new file mode 100644 index 0000000..d537910 --- /dev/null +++ b/agent/backend/otel/policy.go @@ -0,0 +1,177 @@ +package otel + +import ( + "context" + "errors" + "fmt" + "os" + + "github.com/go-cmd/cmd" + "github.com/netboxlabs/orb-agent/agent/policies" + "go.uber.org/zap" + "golang.org/x/exp/slices" + "gopkg.in/yaml.v3" +) + +const tempFileNamePattern = "otel-%s-config.yml" + +type runningPolicy struct { + ctx context.Context + cancel context.CancelFunc + policyId string + telemetryPort int + policyData policies.PolicyData + statusChan *cmd.Status +} + +func (o *openTelemetryBackend) ApplyPolicy(newPolicyData policies.PolicyData, updatePolicy bool) error { + o.logger.Debug("applying policy", zap.String("policy_id", newPolicyData.ID)) + policyYaml, err := yaml.Marshal(newPolicyData.Data) + if err != nil { + o.logger.Warn("yaml policy marshal failure", zap.String("policy_id", newPolicyData.ID), zap.Any("policy", newPolicyData.Data)) + return err + } + builder := getExporterBuilder(o.logger, o.otelReceiverHost, o.otelReceiverPort) + otelConfig, err := builder.GetStructFromYaml(string(policyYaml)) + if err != nil { + return err + } + if err = o.ValidatePolicy(otelConfig); err != nil { + return err + } + otelConfig, err = builder.MergeDefaultValueWithPolicy(otelConfig, newPolicyData.ID, newPolicyData.Name) + if err != nil { + return err + } + newPolicyYaml, err := yaml.Marshal(otelConfig) + if err != nil { + return err + } + if !updatePolicy || !o.policyRepo.Exists(newPolicyData.ID) { + newPolicyPath := fmt.Sprintf("%s/%s", o.policyConfigDirectory, fmt.Sprintf(tempFileNamePattern, newPolicyData.ID)) + o.logger.Info("received new policy", + zap.String("policy_id", newPolicyData.ID), + zap.Int32("version", newPolicyData.Version), + zap.String("policy_path", newPolicyPath)) + if err := os.WriteFile(newPolicyPath, newPolicyYaml, os.ModeTemporary); err != nil { + return err + } + if err = o.addRunner(newPolicyData, newPolicyPath); err != nil { + return err + } + } else { + currentPolicyData, err := o.policyRepo.Get(newPolicyData.ID) + if err != nil { + return err + } + if currentPolicyData.Version <= newPolicyData.Version { + currentPolicyPath := fmt.Sprintf("%s/%s", o.policyConfigDirectory, fmt.Sprintf(tempFileNamePattern, currentPolicyData.ID)) + o.logger.Info("received new policy version", + zap.String("policy_id", newPolicyData.ID), + zap.Int32("version", newPolicyData.Version), + zap.String("policy_path", currentPolicyPath)) + + o.removePolicyControl(currentPolicyData.ID) + + if err := os.WriteFile(currentPolicyPath, newPolicyYaml, os.ModeTemporary); err != nil { + return err + } + if err := o.addRunner(newPolicyData, currentPolicyPath); err != nil { + return err + } + if err := o.policyRepo.Update(newPolicyData); err != nil { + return err + } + } else { + o.logger.Info("current policy version is newer than the one being applied, skipping", + zap.String("policy_id", newPolicyData.ID), + zap.Int32("current_version", currentPolicyData.Version), + zap.Int32("incoming_version", newPolicyData.Version)) + } + } + + return nil +} + +func (o *openTelemetryBackend) addRunner(policyData policies.PolicyData, policyFilePath string) error { + policyContext, policyCancel := context.WithCancel(context.WithValue(o.mainContext, "policy_id", policyData.ID)) + command := cmd.NewCmdOptions(cmd.Options{Buffered: false, Streaming: true}, o.otelExecutablePath, "--config", policyFilePath) + go func(ctx context.Context, logger *zap.Logger) { + status := command.Start() + o.logger.Info("starting otel policy", zap.String("policy_id", policyData.ID), + zap.Any("status", command.Status()), zap.Int("process id", command.Status().PID)) + for command.Status().Complete == false { + select { + case v := <-ctx.Done(): + err := command.Stop() + if err != nil && !slices.Contains([]string{"command not running", "no such process"}, err.Error()) { + logger.Error("failed to stop otel", zap.String("policy_id", policyData.ID), + zap.Any("value", v), zap.Error(err)) + } + return + case line := <-command.Stdout: + if line != "" { + logger.Info("otel stdout", zap.String("policy_id", policyData.ID), zap.String("line", line)) + } + case line := <-command.Stderr: + if line != "" { + logger.Warn("otel stderr", zap.String("policy_id", policyData.ID), zap.String("line", line)) + } + case finalStatus := <-status: + logger.Info("otel finished", zap.String("policy_id", policyData.ID), zap.Any("status", finalStatus)) + } + } + }(policyContext, o.logger) + status := command.Status() + policyEntry := runningPolicy{ + ctx: policyContext, + cancel: policyCancel, + policyId: policyData.ID, + policyData: policyData, + statusChan: &status, + } + o.addPolicyControl(policyEntry, policyData.ID) + + return nil +} + +func (o *openTelemetryBackend) addPolicyControl(policyEntry runningPolicy, policyID string) { + o.runningCollectors[policyID] = policyEntry +} + +func (o *openTelemetryBackend) removePolicyControl(policyID string) { + policy, ok := o.runningCollectors[policyID] + if !ok { + o.logger.Error("did not find a running collector for policy id", zap.String("policy_id", policyID)) + return + } + policy.cancel() +} + +func (o *openTelemetryBackend) RemovePolicy(data policies.PolicyData) error { + if o.policyRepo.Exists(data.ID) { + o.removePolicyControl(data.ID) + policyPath := fmt.Sprintf("%s/%s", o.policyConfigDirectory, fmt.Sprintf(tempFileNamePattern, data.ID)) + o.logger.Info("removing policy", zap.String("policy_id", data.ID), zap.String("policy_path", policyPath)) + // This is a temp file, if it fails to remove, it will be erased once the container is restarted + if err := os.Remove(policyPath); err != nil { + o.logger.Warn("failed to remove policy file, this won't fail policy removal", zap.String("policy_id", data.ID), zap.Error(err)) + } + return nil + } + o.logger.Warn("no policy was removed, policy not found", zap.String("policy_id", data.ID)) + return nil +} + +func (o *openTelemetryBackend) ValidatePolicy(otelConfig openTelemetryConfig) error { + if otelConfig.Service.Pipelines.Logs == nil && + otelConfig.Service.Pipelines.Metrics == nil && + otelConfig.Service.Pipelines.Traces == nil { + return errors.New("no pipelines defined") + } + if len(otelConfig.Receivers) == 0 { + return errors.New("no receivers defined") + } + + return nil +} diff --git a/agent/backend/otel/scrape.go b/agent/backend/otel/scrape.go new file mode 100644 index 0000000..2b4ab80 --- /dev/null +++ b/agent/backend/otel/scrape.go @@ -0,0 +1,212 @@ +package otel + +import ( + "context" + "errors" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/otlpreceiver" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/trace/noop" + "go.uber.org/zap" + "strconv" + "time" +) + +func (o *openTelemetryBackend) receiveOtlp() { + exeCtx, execCancelF := context.WithCancelCause(o.mainContext) + go func() { + count := 0 + maxRetries := 20 + for { + if o.mqttClient != nil { + if ok := o.startOtelMetric(exeCtx, execCancelF); !ok { + o.logger.Error("failed to start otel metric") + return + } + //if o.startOtelTraces(exeCtx, execCancelF) { + // return + //} + //if ok := o.startOtelLogs(exeCtx, execCancelF); !ok { + // return + //} + o.logger.Info("started otel receiver for opentelemetry") + break + } else { + count++ + o.logger.Info("waiting until mqtt client is connected try " + strconv.Itoa(count) + " from " + strconv.Itoa(maxRetries)) + time.Sleep(time.Second * time.Duration(count)) + if count >= maxRetries { + execCancelF(errors.New("mqtt client is not connected")) + o.mainCancelFunction() + break + } + } + } + for { + select { + case <-exeCtx.Done(): + o.logger.Info("stopped receiver context, pktvisor will not scrape metrics", zap.Error(context.Cause(exeCtx))) + o.mainContext.Done() + o.mainCancelFunction() + case <-o.mainContext.Done(): + o.logger.Info("stopped Orb OpenTelemetry agent collector") + o.mainCancelFunction() + return + } + } + }() +} + +func (o *openTelemetryBackend) startOtelMetric(exeCtx context.Context, execCancelF context.CancelCauseFunc) bool { + var err error + o.metricsExporter, err = o.createOtlpMetricMqttExporter(exeCtx, execCancelF) + if err != nil { + o.logger.Error("failed to create a exporter", zap.Error(err)) + return false + } + pFactory := otlpreceiver.NewFactory() + cfg := pFactory.CreateDefaultConfig() + cfg.(*otlpreceiver.Config).Protocols = otlpreceiver.Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: o.otelReceiverHost + ":" + strconv.Itoa(o.otelReceiverPort), + Transport: "tcp", + }, + }, + } + set := receiver.CreateSettings{ + TelemetrySettings: component.TelemetrySettings{ + Logger: o.logger, + TracerProvider: noop.NewTracerProvider(), + MeterProvider: metric.NewMeterProvider(), + ReportComponentStatus: func(*component.StatusEvent) error { + return nil + }, + }, + BuildInfo: component.NewDefaultBuildInfo(), + } + o.metricsReceiver, err = pFactory.CreateMetricsReceiver(exeCtx, set, cfg, o.metricsExporter) + if err != nil { + o.logger.Error("failed to create a receiver", zap.Error(err)) + return false + } + err = o.metricsExporter.Start(exeCtx, nil) + if err != nil { + o.logger.Error("otel mqtt exporter startup error", zap.Error(err)) + return false + } + o.logger.Info("Started receiver for OTLP in orb-agent", + zap.String("host", o.otelReceiverHost), zap.Int("port", o.otelReceiverPort)) + err = o.metricsReceiver.Start(exeCtx, nil) + if err != nil { + o.logger.Error("otel receiver startup error", zap.Error(err)) + return false + } + return true +} + +// TODO fix when create otlpmqtt trace +//func (o *openTelemetryBackend) startOtelTraces(exeCtx context.Context, execCancelF context.CancelFunc) bool { +// if o.tracesExporter != nil { +// return true +// } +// var err error +// o.tracesExporter, err = o.createOtlpTraceMqttExporter(exeCtx, execCancelF) +// if err != nil { +// o.logger.Error("failed to create a exporter", zap.Error(err)) +// return true +// } +// pFactory := otlpreceiver.NewFactory() +// cfg := pFactory.CreateDefaultConfig() +// cfg.(*otlpreceiver.Config).Protocols = otlpreceiver.Protocols{ +// GRPC: &configgrpc.GRPCServerSettings{ +// NetAddr: confignet.NetAddr{ +// Endpoint: o.otelReceiverHost + ":" + strconv.Itoa(o.otelReceiverPort), +// Transport: "tcp", +// }, +// }, +// } +// set := receiver.CreateSettings{ +// TelemetrySettings: component.TelemetrySettings{ +// Logger: o.logger, +// TracerProvider: trace.NewNoopTracerProvider(), +// MeterProvider: metric.NewMeterProvider(), +// ReportComponentStatus: func(*component.StatusEvent) error { +// return nil +// }, +// }, +// BuildInfo: component.NewDefaultBuildInfo(), +// } +// o.tracesReceiver, err = pFactory.CreateTracesReceiver(exeCtx, set, cfg, o.tracesExporter) +// if err != nil { +// o.logger.Error("failed to create a receiver", zap.Error(err)) +// return true +// } +// err = o.metricsExporter.Start(exeCtx, nil) +// if err != nil { +// o.logger.Error("otel mqtt exporter startup error", zap.Error(err)) +// return true +// } +// o.logger.Info("Started receiver for OTLP in orb-agent", +// zap.String("host", o.otelReceiverHost), zap.Int("port", o.otelReceiverPort)) +// err = o.metricsReceiver.Start(exeCtx, nil) +// if err != nil { +// o.logger.Error("otel receiver startup error", zap.Error(err)) +// return true +// } +// return false +//} +// +//func (o *openTelemetryBackend) startOtelLogs(exeCtx context.Context, execCancelF context.CancelFunc) bool { +// if o.logsExporter != nil { +// return true +// } +// var err error +// o.logsExporter, err = o.createOtlpLogsMqttExporter(exeCtx, execCancelF) +// if err != nil { +// o.logger.Error("failed to create a exporter", zap.Error(err)) +// return false +// } +// pFactory := otlpreceiver.NewFactory() +// cfg := pFactory.CreateDefaultConfig() +// cfg.(*otlpreceiver.Config).Protocols = otlpreceiver.Protocols{ +// GRPC: &configgrpc.GRPCServerSettings{ +// NetAddr: confignet.NetAddr{ +// Endpoint: o.otelReceiverHost + ":" + strconv.Itoa(o.otelReceiverPort), +// Transport: "tcp", +// }, +// }, +// } +// set := receiver.CreateSettings{ +// TelemetrySettings: component.TelemetrySettings{ +// Logger: o.logger, +// TracerProvider: trace.NewNoopTracerProvider(), +// MeterProvider: metric.NewMeterProvider(), +// ReportComponentStatus: func(*component.StatusEvent) error { +// return nil +// }, +// }, +// BuildInfo: component.NewDefaultBuildInfo(), +// } +// o.metricsReceiver, err = pFactory.CreateLogsReceiver(exeCtx, set, cfg, o.logsExporter) +// if err != nil { +// o.logger.Error("failed to create a receiver", zap.Error(err)) +// return false +// } +// err = o.metricsExporter.Start(exeCtx, nil) +// if err != nil { +// o.logger.Error("otel mqtt exporter startup error", zap.Error(err)) +// return false +// } +// o.logger.Info("Started receiver for OTLP in orb-agent", +// zap.String("host", o.otelReceiverHost), zap.Int("port", o.otelReceiverPort)) +// err = o.metricsReceiver.Start(exeCtx, nil) +// if err != nil { +// o.logger.Error("otel receiver startup error", zap.Error(err)) +// return false +// } +// return true +//} diff --git a/agent/backend/otel/vars.go b/agent/backend/otel/vars.go new file mode 100644 index 0000000..bf7401f --- /dev/null +++ b/agent/backend/otel/vars.go @@ -0,0 +1,7 @@ +package otel + +import "github.com/spf13/viper" + +func RegisterBackendSpecificVariables(v *viper.Viper) { + v.SetDefault("orb.backends.otel.otlp_port", "4316") +} diff --git a/agent/backend/pktvisor/pktvisor.go b/agent/backend/pktvisor/pktvisor.go new file mode 100644 index 0000000..6559a61 --- /dev/null +++ b/agent/backend/pktvisor/pktvisor.go @@ -0,0 +1,355 @@ +package pktvisor + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "os/exec" + "strconv" + "strings" + "time" + + mqtt "github.com/eclipse/paho.mqtt.golang" + "github.com/go-cmd/cmd" + "github.com/netboxlabs/orb-agent/agent/backend" + "github.com/netboxlabs/orb-agent/agent/config" + "github.com/netboxlabs/orb-agent/agent/policies" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/receiver" + "go.uber.org/zap" +) + +var _ backend.Backend = (*pktvisorBackend)(nil) + +const ( + DefaultBinary = "/usr/local/sbin/pktvisord" + ReadinessBackoff = 10 + ReadinessTimeout = 10 + ApplyPolicyTimeout = 10 + RemovePolicyTimeout = 20 + VersionTimeout = 2 + ScrapeTimeout = 5 + TapsTimeout = 5 + DefaultConfigPath = "/opt/orb/agent.yaml" + DefaultAPIHost = "localhost" + DefaultAPIPort = "10853" +) + +// AppInfo represents server application information +type AppInfo struct { + App struct { + Version string `json:"version"` + UpTimeMin float64 `json:"up_time_min"` + } `json:"app"` +} + +type pktvisorBackend struct { + logger *zap.Logger + binary string + configFile string + pktvisorVersion string + proc *cmd.Cmd + statusChan <-chan cmd.Status + startTime time.Time + cancelFunc context.CancelFunc + ctx context.Context + + // MQTT Config for OTEL MQTT Exporter + mqttConfig config.MQTTConfig + + mqttClient *mqtt.Client + metricsTopic string + otlpMetricsTopic string + policyRepo policies.PolicyRepo + + adminAPIHost string + adminAPIPort string + adminAPIProtocol string + + // added for Strings + agentTags map[string]string + + // OpenTelemetry management + otelReceiverHost string + otelReceiverPort int + receiver receiver.Metrics + exporter exporter.Metrics +} + +func (p *pktvisorBackend) getFreePort() (int, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return 0, err + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return 0, err + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil +} + +func (p *pktvisorBackend) GetStartTime() time.Time { + return p.startTime +} + +func (p *pktvisorBackend) GetInitialState() backend.RunningStatus { + return backend.Unknown +} + +func (p *pktvisorBackend) SetCommsClient(agentID string, client *mqtt.Client, baseTopic string) { + p.mqttClient = client + metricsTopic := strings.Replace(baseTopic, "?", "be", 1) + otelMetricsTopic := strings.Replace(baseTopic, "?", "otlp", 1) + p.metricsTopic = fmt.Sprintf("%s/m/%c", metricsTopic, agentID[0]) + p.otlpMetricsTopic = fmt.Sprintf("%s/m/%c", otelMetricsTopic, agentID[0]) +} + +func (p *pktvisorBackend) GetRunningStatus() (backend.RunningStatus, string, error) { + // first check process status + runningStatus, errMsg, err := p.getProcRunningStatus() + // if it's not running, we're done + if runningStatus != backend.Running { + return runningStatus, errMsg, err + } + // if it's running, check REST API availability too + _, aiErr := p.getAppInfo() + if aiErr != nil { + // process is running, but REST API is not accessible + return backend.BackendError, "process running, REST API unavailable", aiErr + } + return runningStatus, "", nil +} + +func (p *pktvisorBackend) Version() (string, error) { + appInfo, err := p.getAppInfo() + if err != nil { + return "", err + } + p.pktvisorVersion = appInfo.App.Version + return appInfo.App.Version, nil +} + +func (p *pktvisorBackend) Start(ctx context.Context, cancelFunc context.CancelFunc) error { + + // this should record the start time whether it's successful or not + // because it is used by the automatic restart system for last attempt + p.startTime = time.Now() + p.cancelFunc = cancelFunc + p.ctx = ctx + + _, err := exec.LookPath(p.binary) + if err != nil { + p.logger.Error("pktvisor startup error: binary not found", zap.Error(err)) + return err + } + + pvOptions := []string{ + "--admin-api", + "-l", + p.adminAPIHost, + "-p", + p.adminAPIPort, + } + if len(p.configFile) > 0 { + pvOptions = append(pvOptions, "--config", p.configFile) + } + + pvOptions = append(pvOptions, "--otel") + pvOptions = append(pvOptions, "--otel-host", p.otelReceiverHost) + if p.otelReceiverPort == 0 { + p.otelReceiverPort, err = p.getFreePort() + if err != nil { + p.logger.Error("pktvisor otlp startup error", zap.Error(err)) + return err + } + } + pvOptions = append(pvOptions, "--otel-port", strconv.Itoa(p.otelReceiverPort)) + + // the macros should be properly configured to enable crashpad + // pvOptions = append(pvOptions, "--cp-token", PKTVISOR_CP_TOKEN) + // pvOptions = append(pvOptions, "--cp-url", PKTVISOR_CP_URL) + // pvOptions = append(pvOptions, "--cp-path", PKTVISOR_CP_PATH) + // pvOptions = append(pvOptions, "--default-geo-city", "/geo-db/city.mmdb") + // pvOptions = append(pvOptions, "--default-geo-asn", "/geo-db/asn.mmdb") + // pvOptions = append(pvOptions, "--default-service-registry", "/iana/custom-iana.csv") + pvOptions = append(pvOptions, "--cp-custom", ctx.Value("agent_id").(string)) + + p.logger.Info("pktvisor startup", zap.Strings("arguments", pvOptions)) + + p.proc = cmd.NewCmdOptions(cmd.Options{ + Buffered: false, + Streaming: true, + }, p.binary, pvOptions...) + p.statusChan = p.proc.Start() + + // log STDOUT and STDERR lines streaming from Cmd + doneChan := make(chan struct{}) + go func() { + defer func() { + if doneChan != nil { + close(doneChan) + } + }() + for p.proc.Stdout != nil || p.proc.Stderr != nil { + select { + case line, open := <-p.proc.Stdout: + if !open { + p.proc.Stdout = nil + continue + } + p.logger.Info("pktvisor stdout", zap.String("log", line)) + case line, open := <-p.proc.Stderr: + if !open { + p.proc.Stderr = nil + continue + } + p.logger.Info("pktvisor stderr", zap.String("log", line)) + } + } + }() + + // wait for simple startup errors + time.Sleep(time.Second) + + status := p.proc.Status() + + if status.Error != nil { + p.logger.Error("pktvisor startup error", zap.Error(status.Error)) + return status.Error + } + + if status.Complete { + err = p.proc.Stop() + if err != nil { + p.logger.Error("proc.Stop error", zap.Error(err)) + } + return errors.New("pktvisor startup error, check log") + } + + p.logger.Info("pktvisor process started", zap.Int("pid", status.PID)) + p.receiveOtlp() + + var readinessError error + for backoff := 0; backoff < ReadinessBackoff; backoff++ { + var appMetrics AppInfo + readinessError = p.request("metrics/app", &appMetrics, http.MethodGet, http.NoBody, "application/json", ReadinessTimeout) + if readinessError == nil { + p.logger.Info("pktvisor readiness ok, got version ", zap.String("pktvisor_version", appMetrics.App.Version)) + break + } + backoffDuration := time.Duration(backoff) * time.Second + p.logger.Info("pktvisor is not ready, trying again with backoff", zap.String("backoff backoffDuration", backoffDuration.String())) + time.Sleep(backoffDuration) + } + + if readinessError != nil { + p.logger.Error("pktvisor error on readiness", zap.Error(readinessError)) + err = p.proc.Stop() + if err != nil { + p.logger.Error("proc.Stop error", zap.Error(err)) + } + return readinessError + } + + return nil +} + +func (p *pktvisorBackend) Stop(ctx context.Context) error { + p.logger.Info("routine call to stop pktvisor", zap.Any("routine", ctx.Value("routine"))) + defer p.cancelFunc() + err := p.proc.Stop() + finalStatus := <-p.statusChan + if err != nil { + p.logger.Error("pktvisor shutdown error", zap.Error(err)) + } + + p.logger.Info("pktvisor process stopped", zap.Int("pid", finalStatus.PID), zap.Int("exit_code", finalStatus.Exit)) + return nil +} + +// Configure this will set configurations, but if not set, will use the following defaults +func (p *pktvisorBackend) Configure(logger *zap.Logger, repo policies.PolicyRepo, config map[string]string, otelConfig map[string]interface{}) error { + p.logger = logger + p.policyRepo = repo + + var prs bool + if p.binary, prs = config["binary"]; !prs { + p.binary = DefaultBinary + } + if p.configFile, prs = config["config_file"]; !prs { + p.configFile = DefaultConfigPath + } + if p.adminAPIHost, prs = config["api_host"]; !prs { + p.adminAPIHost = DefaultAPIHost + } + if p.adminAPIPort, prs = config["api_port"]; !prs { + p.adminAPIPort = DefaultAPIPort + } + if agentTags, ok := otelConfig["agent_tags"]; ok { + p.agentTags = agentTags.(map[string]string) + } + + for k, v := range otelConfig { + switch k { + case "Host": + p.otelReceiverHost = v.(string) + case "Port": + if v.(int) == 0 { + var err error + p.otelReceiverPort, err = p.getFreePort() + if err != nil { + p.logger.Error("pktvisor otlp startup error", zap.Error(err)) + return err + } + } else { + p.otelReceiverPort = v.(int) + } + } + } + p.logger.Info("configured otel receiver host", zap.String("host", p.otelReceiverHost), zap.Int("port", p.otelReceiverPort)) + + return nil +} + +func (p *pktvisorBackend) GetCapabilities() (map[string]interface{}, error) { + var taps interface{} + err := p.request("taps", &taps, http.MethodGet, http.NoBody, "application/json", TapsTimeout) + if err != nil { + return nil, err + } + jsonBody := make(map[string]interface{}) + jsonBody["taps"] = taps + return jsonBody, nil +} + +func (p *pktvisorBackend) FullReset(ctx context.Context) error { + + // force a stop, which stops scrape as well. if proc is dead, it no ops. + if state, _, _ := p.getProcRunningStatus(); state == backend.Running { + if err := p.Stop(ctx); err != nil { + p.logger.Error("failed to stop backend on restart procedure", zap.Error(err)) + return err + } + } + + // for each policy, restart the scraper + backendCtx, cancelFunc := context.WithCancel(context.WithValue(ctx, "routine", "pktvisor")) + + // start it + if err := p.Start(backendCtx, cancelFunc); err != nil { + p.logger.Error("failed to start backend on restart procedure", zap.Error(err)) + return err + } + + return nil +} + +func Register() bool { + backend.Register("pktvisor", &pktvisorBackend{ + adminAPIProtocol: "http", + }) + return true +} diff --git a/agent/backend/pktvisor/policy.go b/agent/backend/pktvisor/policy.go new file mode 100644 index 0000000..292ec14 --- /dev/null +++ b/agent/backend/pktvisor/policy.go @@ -0,0 +1,64 @@ +package pktvisor + +import ( + "bytes" + "fmt" + "net/http" + + "github.com/netboxlabs/orb-agent/agent/policies" + "go.uber.org/zap" + "gopkg.in/yaml.v3" +) + +func (p *pktvisorBackend) ApplyPolicy(data policies.PolicyData, updatePolicy bool) error { + + if updatePolicy { + // To update a policy it's necessary first remove it and then apply a new version + if err := p.RemovePolicy(data); err != nil { + p.logger.Warn("policy failed to remove", zap.String("policy_id", data.ID), zap.String("policy_name", data.Name), zap.Error(err)) + } + } + + p.logger.Debug("pktvisor policy apply", zap.String("policy_id", data.ID), zap.Any("data", data.Data)) + + fullPolicy := map[string]interface{}{ + "version": "1.0", + "visor": map[string]interface{}{ + "policies": map[string]interface{}{ + data.Name: data.Data, + }, + }, + } + + policyYaml, err := yaml.Marshal(fullPolicy) + if err != nil { + p.logger.Warn("yaml policy marshal failure", zap.String("policy_id", data.ID), zap.Any("policy", fullPolicy)) + return err + } + + var resp map[string]interface{} + err = p.request("policies", &resp, http.MethodPost, bytes.NewBuffer(policyYaml), "application/x-yaml", ApplyPolicyTimeout) + if err != nil { + p.logger.Warn("yaml policy application failure", zap.String("policy_id", data.ID), zap.ByteString("policy", policyYaml)) + return err + } + + return nil +} + +func (p *pktvisorBackend) RemovePolicy(data policies.PolicyData) error { + p.logger.Debug("pktvisor policy remove", zap.String("policy_id", data.ID)) + var resp interface{} + var name string + // Since we use Name for removing policies not IDs, if there is a change, we need to remove the previous name of the policy + if data.PreviousPolicyData != nil && data.PreviousPolicyData.Name != data.Name { + name = data.PreviousPolicyData.Name + } else { + name = data.Name + } + err := p.request(fmt.Sprintf("policies/%s", name), &resp, http.MethodDelete, http.NoBody, "application/json", RemovePolicyTimeout) + if err != nil { + return err + } + return nil +} diff --git a/agent/backend/pktvisor/scrape.go b/agent/backend/pktvisor/scrape.go new file mode 100644 index 0000000..1655d2a --- /dev/null +++ b/agent/backend/pktvisor/scrape.go @@ -0,0 +1,136 @@ +package pktvisor + +import ( + "context" + "errors" + "fmt" + "net/http" + "strconv" + "time" + + "go.opentelemetry.io/otel/trace/noop" + + "github.com/netboxlabs/orb-agent/agent/otel" + "github.com/netboxlabs/orb-agent/agent/otel/otlpmqttexporter" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/otlpreceiver" + "go.opentelemetry.io/otel/sdk/metric" + "go.uber.org/zap" +) + +func (p *pktvisorBackend) scrapeMetrics(period uint) (map[string]interface{}, error) { + var metrics map[string]interface{} + err := p.request(fmt.Sprintf("policies/__all/metrics/bucket/%d", period), &metrics, http.MethodGet, http.NoBody, "application/json", ScrapeTimeout) + if err != nil { + return nil, err + } + return metrics, nil +} + +func (p *pktvisorBackend) createOtlpMqttExporter(ctx context.Context, cancelFunc context.CancelCauseFunc) (exporter.Metrics, error) { + bridgeService := otel.NewBridgeService(ctx, cancelFunc, &p.policyRepo, p.agentTags) + var cfg component.Config + if p.mqttClient != nil { + cfg = otlpmqttexporter.CreateConfigClient(p.mqttClient, p.otlpMetricsTopic, p.pktvisorVersion, bridgeService) + } else { + cfg = otlpmqttexporter.CreateConfig(p.mqttConfig.Address, p.mqttConfig.Id, p.mqttConfig.Key, + p.mqttConfig.ChannelID, p.pktvisorVersion, p.otlpMetricsTopic, bridgeService) + } + + set := otlpmqttexporter.CreateDefaultSettings(p.logger) + // Create the OTLP metrics exporter that'll receive and verify the metrics produced. + return otlpmqttexporter.CreateMetricsExporter(ctx, set, cfg) +} + +func (p *pktvisorBackend) startOtelMetric(exeCtx context.Context, execCancelF context.CancelCauseFunc) bool { + var err error + p.exporter, err = p.createOtlpMqttExporter(exeCtx, execCancelF) + if err != nil { + p.logger.Error("failed to create a exporter", zap.Error(err)) + return false + } + pFactory := otlpreceiver.NewFactory() + cfg := pFactory.CreateDefaultConfig() + cfg.(*otlpreceiver.Config).Protocols = otlpreceiver.Protocols{ + HTTP: &otlpreceiver.HTTPConfig{ + HTTPServerSettings: &confighttp.HTTPServerSettings{ + Endpoint: p.otelReceiverHost + ":" + strconv.Itoa(p.otelReceiverPort), + }, + MetricsURLPath: "/v1/metrics", + }, + } + set := receiver.CreateSettings{ + TelemetrySettings: component.TelemetrySettings{ + Logger: p.logger, + TracerProvider: noop.NewTracerProvider(), + MeterProvider: metric.NewMeterProvider(), + ReportComponentStatus: func(*component.StatusEvent) error { + return nil + }, + }, + BuildInfo: component.NewDefaultBuildInfo(), + } + + p.receiver, err = pFactory.CreateMetricsReceiver(exeCtx, set, cfg, p.exporter) + if err != nil { + p.logger.Error("failed to create a receiver", zap.Error(err)) + return false + } + err = p.exporter.Start(exeCtx, nil) + if err != nil { + p.logger.Error("otel mqtt exporter startup error", zap.Error(err)) + return false + } + p.logger.Info("Started receiver for OTLP in orb-agent", + zap.String("host", p.otelReceiverHost), zap.Int("port", p.otelReceiverPort)) + err = p.receiver.Start(exeCtx, nil) + if err != nil { + p.logger.Error("otel receiver startup error", zap.Error(err)) + return false + } + return true +} + +func (p *pktvisorBackend) receiveOtlp() { + exeCtx, execCancelF := context.WithCancelCause(p.ctx) + go func() { + count := 0 + for { + if p.mqttClient != nil { + if ok := p.startOtelMetric(exeCtx, execCancelF); !ok { + p.logger.Error("failed to start otel metric") + return + } + p.logger.Info("started otel receiver for pktvisor") + break + } else { + count++ + p.logger.Info("waiting until mqtt client is connected try " + strconv.Itoa(count) + " from 10") + time.Sleep(time.Second * time.Duration(count)) + if count >= 10 { + execCancelF(errors.New("mqtt client is not connected")) + _ = p.Stop(exeCtx) + break + } + } + } + for { + select { + case <-exeCtx.Done(): + p.logger.Info("stopped receiver context, pktvisor will not scrape metrics", zap.Error(context.Cause(exeCtx))) + p.cancelFunc() + _ = p.exporter.Shutdown(exeCtx) + _ = p.receiver.Shutdown(exeCtx) + case <-p.ctx.Done(): + p.logger.Info("stopped pktvisor main context, stopping receiver") + execCancelF(errors.New("stopped pktvisor main context")) + _ = p.exporter.Shutdown(exeCtx) + _ = p.receiver.Shutdown(exeCtx) + return + } + } + }() +} diff --git a/agent/backend/pktvisor/utils.go b/agent/backend/pktvisor/utils.go new file mode 100644 index 0000000..b4be689 --- /dev/null +++ b/agent/backend/pktvisor/utils.go @@ -0,0 +1,97 @@ +package pktvisor + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/netboxlabs/orb-agent/agent/backend" + "go.uber.org/zap" +) + +// note this needs to be stateless because it is called for multiple go routines +func (p *pktvisorBackend) request(url string, payload interface{}, method string, body io.Reader, contentType string, timeout int32) error { + client := http.Client{ + Timeout: time.Second * time.Duration(timeout), + } + + status, _, err := p.getProcRunningStatus() + if status != backend.Running { + p.logger.Warn("skipping pktvisor REST API request because process is not running or is unresponsive", zap.String("url", url), zap.String("method", method), zap.Error(err)) + return err + } + + URL := fmt.Sprintf("%s://%s:%s/api/v1/%s", p.adminAPIProtocol, p.adminAPIHost, p.adminAPIPort, url) + + req, err := http.NewRequest(method, URL, body) + if err != nil { + p.logger.Error("received error from payload", zap.Error(err)) + return err + } + + req.Header.Add("Content-Type", contentType) + res, getErr := client.Do(req) + + if getErr != nil { + p.logger.Error("received error from payload", zap.Error(getErr)) + return getErr + } + + if (res.StatusCode < 200) || (res.StatusCode > 299) { + body, err := io.ReadAll(res.Body) + if err != nil { + return errors.New(fmt.Sprintf("non 2xx HTTP error code from pktvisord, no or invalid body: %d", res.StatusCode)) + } + if len(body) == 0 { + return errors.New(fmt.Sprintf("%d empty body", res.StatusCode)) + } else if body[0] == '{' { + var jsonBody map[string]interface{} + err := json.Unmarshal(body, &jsonBody) + if err == nil { + if errMsg, ok := jsonBody["error"]; ok { + return errors.New(fmt.Sprintf("%d %s", res.StatusCode, errMsg)) + } + } + } + } + + if res.Body != nil { + err = json.NewDecoder(res.Body).Decode(&payload) + if err != nil { + return err + } + } + return nil +} + +func (p *pktvisorBackend) getProcRunningStatus() (backend.RunningStatus, string, error) { + if p.proc == nil { + return backend.Unknown, "backend not started yet", nil + } + status := p.proc.Status() + + if status.Error != nil { + errMsg := fmt.Sprintf("pktvisor process error: %v", status.Error) + return backend.BackendError, errMsg, status.Error + } + + if status.Complete { + err := p.proc.Stop() + return backend.Offline, "pktvisor process ended", err + } + + if status.StopTs > 0 { + return backend.Offline, "pktvisor process ended", nil + } + return backend.Running, "", nil +} + +// also used for HTTP REST API readiness check +func (p *pktvisorBackend) getAppInfo() (AppInfo, error) { + var appInfo AppInfo + err := p.request("metrics/app", &appInfo, http.MethodGet, http.NoBody, "application/json", VersionTimeout) + return appInfo, err +} diff --git a/agent/backend/pktvisor/vars.go b/agent/backend/pktvisor/vars.go new file mode 100644 index 0000000..c98ead6 --- /dev/null +++ b/agent/backend/pktvisor/vars.go @@ -0,0 +1,12 @@ +package pktvisor + +import ( + "github.com/spf13/viper" +) + +func RegisterBackendSpecificVariables(v *viper.Viper) { + v.SetDefault("orb.backends.pktvisor.binary", "/usr/local/sbin/pktvisord") + v.SetDefault("orb.backends.pktvisor.config_file", "/opt/orb/agent.yaml") + v.SetDefault("orb.backends.pktvisor.api_host", "localhost") + v.SetDefault("orb.backends.pktvisor.api_port", "10853") +} diff --git a/agent/cloud_config/cloud_config.go b/agent/cloud_config/cloud_config.go new file mode 100644 index 0000000..2f78390 --- /dev/null +++ b/agent/cloud_config/cloud_config.go @@ -0,0 +1,224 @@ +package cloud_config + +import ( + "bytes" + "crypto/tls" + "database/sql" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/jmoiron/sqlx" + "github.com/netboxlabs/orb-agent/agent/config" + migrate "github.com/rubenv/sql-migrate" + "go.uber.org/zap" +) + +type CloudConfigManager interface { + GetCloudConfig() (config.MQTTConfig, error) +} + +var _ CloudConfigManager = (*cloudConfigManager)(nil) + +type cloudConfigManager struct { + logger *zap.Logger + config config.Config + db *sqlx.DB +} + +func New(logger *zap.Logger, c config.Config, db *sqlx.DB) (CloudConfigManager, error) { + return &cloudConfigManager{logger: logger, config: c, db: db}, nil +} + +func (cc *cloudConfigManager) migrateDB() error { + migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + { + Id: "cloud_config_1", + Up: []string{ + `CREATE TABLE IF NOT EXISTS cloud_config ( + address TEXT NOT NULL, + id TEXT NOT NULL, + key TEXT NOT NULL, + channel TEXT NOT NULL, + ts_created INTEGER NOT NULL + )`, + }, + Down: []string{ + "DROP TABLE cloud_config", + }, + }, + }, + } + + _, err := migrate.Exec(cc.db.DB, "sqlite3", migrations, migrate.Up) + + return err +} + +func (cc *cloudConfigManager) request(address string, token string, response interface{}, method string, body []byte) error { + tlsConfig := &tls.Config{InsecureSkipVerify: false} + if !cc.config.OrbAgent.TLS.Verify { + tlsConfig.InsecureSkipVerify = true + } + transport := &http.Transport{TLSClientConfig: tlsConfig} + client := http.Client{ + Timeout: time.Second * 10, + Transport: transport, + } + URL := fmt.Sprintf("%s/api/v1/agents", address) + + req, err := http.NewRequest(method, URL, bytes.NewBuffer(body)) + if err != nil { + return err + } + req.Header.Add("Content-Type", "application/json") + cc.logger.Debug("cloud api request", zap.String("url", req.URL.String()), zap.ByteString("body", body)) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token)) + + res, getErr := client.Do(req) + if getErr != nil { + return getErr + } + if (res.StatusCode < 200) || (res.StatusCode > 299) { + body, err := io.ReadAll(res.Body) + if err != nil { + return errors.New(fmt.Sprintf("expected 2xx status code, no or invalid body: %d", res.StatusCode)) + } + if body[0] == '{' { + var jsonBody map[string]interface{} + err := json.Unmarshal(body, &jsonBody) + if err == nil { + if errMsg, ok := jsonBody["error"]; ok { + return errors.New(fmt.Sprintf("%d %s", res.StatusCode, errMsg)) + } + } + } + return errors.New(fmt.Sprintf("%d %s", res.StatusCode, body)) + } + + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + return err + } + return nil +} + +func (cc *cloudConfigManager) autoProvision(apiAddress string, token string) (config.MQTTConfig, error) { + + type AgentRes struct { + ID string `json:"id"` + Key string `json:"key"` + ChannelID string `json:"channel_id"` + } + + type AgentReq struct { + Name string `json:"name"` + AgentTags map[string]string `json:"agent_tags"` + } + + aname := cc.config.OrbAgent.Cloud.Config.AgentName + if aname == "" { + hostname, err := os.Hostname() + if err != nil { + return config.MQTTConfig{}, err + } + aname = hostname + } + + agentReq := AgentReq{Name: strings.Replace(aname, ".", "-", -1), AgentTags: cc.config.OrbAgent.Tags} + body, err := json.Marshal(agentReq) + if err != nil { + return config.MQTTConfig{}, err + } + + cc.logger.Info("attempting auto provision", zap.String("address", apiAddress)) + + var result AgentRes + err = cc.request(apiAddress, token, &result, http.MethodPost, body) + if err != nil { + return config.MQTTConfig{}, err + } + + // save to local config + address := "" + _, err = cc.db.Exec(`INSERT INTO cloud_config VALUES ($1, $2, $3, $4, datetime('now'))`, address, result.ID, result.Key, result.ChannelID) + if err != nil { + return config.MQTTConfig{}, err + } + + return config.MQTTConfig{ + Id: result.ID, + Key: result.Key, + ChannelID: result.ChannelID, + }, nil + +} + +func (cc *cloudConfigManager) GetCloudConfig() (config.MQTTConfig, error) { + + // currently we require address to be specified, it cannot be auto provisioned. + // this may change in the future + mqtt := cc.config.OrbAgent.Cloud.MQTT + + if len(mqtt.Id) > 0 && len(mqtt.Key) > 0 && len(mqtt.ChannelID) > 0 { + cc.logger.Info("using explicitly specified cloud configuration", + zap.String("address", mqtt.Address), + zap.String("id", mqtt.Id)) + return config.MQTTConfig{ + Address: mqtt.Address, + Id: mqtt.Id, + Key: mqtt.Key, + ChannelID: mqtt.ChannelID, + }, nil + } + + // if full config is not available, possibly attempt auto provision configuration + if !cc.config.OrbAgent.Cloud.Config.AutoProvision { + return config.MQTTConfig{}, errors.New("valid cloud MQTT config was not specified, and auto_provision was disabled") + } + + err := cc.migrateDB() + if err != nil { + return config.MQTTConfig{}, err + } + + // see if we have an existing auto provisioned configuration saved locally + q := `SELECT id, key, channel FROM cloud_config ORDER BY ts_created DESC LIMIT 1` + dba := config.MQTTConfig{} + if err := cc.db.QueryRowx(q).Scan(&dba.Id, &dba.Key, &dba.ChannelID); err != nil { + if err != sql.ErrNoRows { + return config.MQTTConfig{}, err + } + } else { + // successfully loaded previous auto provision + dba.Address = mqtt.Address + cc.logger.Info("using previous auto provisioned cloud configuration loaded from local storage", + zap.String("address", mqtt.Address), + zap.String("id", dba.Id)) + return dba, nil + } + + // attempt a live auto provision + apiConfig := cc.config.OrbAgent.Cloud.API + if len(apiConfig.Token) == 0 { + return config.MQTTConfig{}, errors.New("wanted to auto provision, but no API token was available") + } + + result, err := cc.autoProvision(apiConfig.Address, apiConfig.Token) + if err != nil { + return config.MQTTConfig{}, err + } + result.Address = mqtt.Address + cc.logger.Info("using auto provisioned cloud configuration", + zap.String("address", mqtt.Address), + zap.String("id", result.Id)) + + return result, nil + +} diff --git a/agent/comms.go b/agent/comms.go new file mode 100644 index 0000000..13d4b8f --- /dev/null +++ b/agent/comms.go @@ -0,0 +1,240 @@ +package agent + +import ( + "context" + "crypto/tls" + "fmt" + "time" + + mqtt "github.com/eclipse/paho.mqtt.golang" + "github.com/netboxlabs/orb-agent/agent/backend" + "github.com/netboxlabs/orb-agent/agent/config" + "github.com/orb-community/orb/fleet" + "go.uber.org/zap" +) + +func (a *orbAgent) connect(ctx context.Context, config config.MQTTConfig) (mqtt.Client, error) { + opts := mqtt.NewClientOptions().AddBroker(config.Address).SetClientID(config.Id) + opts.SetUsername(config.Id) + opts.SetPassword(config.Key) + opts.SetKeepAlive(10 * time.Second) + opts.SetDefaultPublishHandler(func(client mqtt.Client, message mqtt.Message) { + a.logger.Info("message on unknown channel, ignoring", zap.String("topic", message.Topic()), zap.ByteString("payload", message.Payload())) + }) + opts.SetConnectionLostHandler(func(client mqtt.Client, err error) { + a.logger.Error("connection to mqtt lost", zap.Error(err)) + a.logger.Info("reconnecting....") + client.Connect() + }) + opts.SetPingTimeout(5 * time.Second) + opts.SetAutoReconnect(false) + opts.SetCleanSession(true) + opts.SetConnectTimeout(5 * time.Minute) + opts.SetResumeSubs(true) + opts.SetReconnectingHandler(func(client mqtt.Client, options *mqtt.ClientOptions) { + go func() { + ok := false + for i := 1; i < 10; i++ { + select { + case <-ctx.Done(): + return + default: + if len(a.backends) == 0 { + time.Sleep(time.Duration(i) * time.Second) + continue + } + for name, be := range a.backends { + backendStatus, s, _ := be.GetRunningStatus() + a.logger.Debug("backend in status", zap.String("backend", name), zap.String("status", s)) + switch backendStatus { + case backend.Running: + ok = true + a.requestReconnection(ctx, client, config) + return + case backend.Waiting: + ok = true + a.requestReconnection(ctx, client, config) + return + default: + a.logger.Info("waiting until a backend is in running state", zap.String("backend", name), + zap.String("current state", s), zap.String("wait time", (time.Duration(i)*time.Second).String())) + time.Sleep(time.Duration(i) * time.Second) + continue + } + } + } + } + if !ok { + a.logger.Error("backend wasn't able to change to running, stopping connection") + ctx.Done() + } + }() + }) + opts.SetOnConnectHandler(func(client mqtt.Client) { + go func() { + ok := false + for i := 1; i < 10; i++ { + select { + case <-ctx.Done(): + return + default: + if len(a.backends) == 0 { + time.Sleep(time.Duration(i) * time.Second) + continue + } + for name, be := range a.backends { + backendStatus, s, _ := be.GetRunningStatus() + a.logger.Debug("backend in status", zap.String("backend", name), zap.String("status", s)) + switch backendStatus { + case backend.Running: + ok = true + a.requestReconnection(ctx, client, config) + return + case backend.Waiting: + ok = true + a.requestReconnection(ctx, client, config) + return + default: + a.logger.Info("waiting until a backend is in running state", zap.String("backend", name), + zap.String("current state", s), zap.String("wait time", (time.Duration(i)*time.Second).String())) + time.Sleep(time.Duration(i) * time.Second) + continue + } + } + } + } + if !ok { + a.logger.Error("backend wasn't able to change to running, stopping connection") + ctx.Done() + } + }() + }) + + if !a.config.OrbAgent.TLS.Verify { + opts.TLSConfig = &tls.Config{InsecureSkipVerify: true} + } + + c := mqtt.NewClient(opts) + if token := c.Connect(); token.Wait() && token.Error() != nil { + return nil, token.Error() + } + + return c, nil +} + +func (a *orbAgent) requestReconnection(ctx context.Context, client mqtt.Client, config config.MQTTConfig) { + a.nameAgentRPCTopics(config.ChannelID) + for name, be := range a.backends { + be.SetCommsClient(config.Id, &client, fmt.Sprintf("%s/?/%s", a.baseTopic, name)) + } + a.agent_id = config.Id + + if token := client.Subscribe(a.rpcFromCoreTopic, 1, a.handleRPCFromCore); token.Wait() && token.Error() != nil { + a.logger.Error("failed to subscribe to agent control plane RPC topic", zap.String("topic", a.rpcFromCoreTopic), zap.Error(token.Error())) + a.logger.Error("critical failure: unable to subscribe to control plane") + a.Stop(ctx) + return + } + + err := a.sendCapabilities() + if err != nil { + a.logger.Error("failed to send agent capabilities", zap.Error(err)) + } + + err = a.sendGroupMembershipReq() + if err != nil { + a.logger.Error("failed to send group membership request", zap.Error(err)) + } +} + +func (a *orbAgent) nameAgentRPCTopics(channelId string) { + + base := fmt.Sprintf("channels/%s/messages", channelId) + a.rpcToCoreTopic = fmt.Sprintf("%s/%s", base, fleet.RPCToCoreTopic) + a.rpcFromCoreTopic = fmt.Sprintf("%s/%s", base, fleet.RPCFromCoreTopic) + a.capabilitiesTopic = fmt.Sprintf("%s/%s", base, fleet.CapabilitiesTopic) + a.heartbeatsTopic = fmt.Sprintf("%s/%s", base, fleet.HeartbeatsTopic) + a.logTopic = fmt.Sprintf("%s/%s", base, fleet.LogTopic) + a.baseTopic = base + +} + +func (a *orbAgent) unsubscribeGroupChannels() { + a.logger.Debug("calling to unsub group channels") + for id, groupInfo := range a.groupsInfos { + base := fmt.Sprintf("channels/%s/messages", groupInfo.ChannelID) + rpcFromCoreTopic := fmt.Sprintf("%s/%s", base, fleet.RPCFromCoreTopic) + if token := a.client.Unsubscribe(rpcFromCoreTopic); token.Wait() && token.Error() != nil { + a.logger.Warn("failed to unsubscribe to group channel", zap.String("group_id", id), zap.String("group_name", groupInfo.Name), zap.String("topic", groupInfo.ChannelID), zap.Error(token.Error())) + } + a.logger.Info("completed RPC unsubscription to group", zap.String("group_id", id), zap.String("group_name", groupInfo.Name), zap.String("topic", rpcFromCoreTopic)) + } + a.groupsInfos = make(map[string]GroupInfo) +} + +func (a *orbAgent) unsubscribeGroupChannel(channelID string, agentGroupID string) { + base := fmt.Sprintf("channels/%s/messages", channelID) + rpcFromCoreTopic := fmt.Sprintf("%s/%s", base, fleet.RPCFromCoreTopic) + if token := a.client.Unsubscribe(channelID); token.Wait() && token.Error() != nil { + a.logger.Warn("failed to unsubscribe to group channel", zap.String("topic", rpcFromCoreTopic), zap.Error(token.Error())) + return + } + a.logger.Info("completed RPC unsubscription to group", zap.String("topic", rpcFromCoreTopic)) + delete(a.groupsInfos, agentGroupID) +} + +func (a *orbAgent) removeDatasetFromPolicy(datasetID string, policyID string) { + for _, be := range a.backends { + a.policyManager.RemovePolicyDataset(policyID, datasetID, be) + } +} + +func (a *orbAgent) startComms(ctx context.Context, config config.MQTTConfig) error { + + var err error + a.logger.Debug("starting mqtt connection") + if a.client == nil || !a.client.IsConnected() { + a.client, err = a.connect(ctx, config) + if err != nil { + a.logger.Error("connection failed", zap.String("channel", config.ChannelID), zap.String("agent_id", config.Id), zap.Error(err)) + return ErrMqttConnection + } + // Store the data from connection to cloud config within agent. + a.config.OrbAgent.Cloud.MQTT.Id = config.Id + a.config.OrbAgent.Cloud.MQTT.Key = config.Key + a.config.OrbAgent.Cloud.MQTT.Address = config.Address + a.config.OrbAgent.Cloud.MQTT.ChannelID = config.ChannelID + } else { + a.requestReconnection(ctx, a.client, config) + } + + return nil +} + +func (a *orbAgent) subscribeGroupChannels(groups []fleet.GroupMembershipData) { + for _, groupData := range groups { + + base := fmt.Sprintf("channels/%s/messages", groupData.ChannelID) + rpcFromCoreTopic := fmt.Sprintf("%s/%s", base, fleet.RPCFromCoreTopic) + + token := a.client.Subscribe(rpcFromCoreTopic, 1, a.handleGroupRPCFromCore) + if token.Error() != nil { + a.logger.Error("failed to subscribe to group channel/topic", zap.String("group_id", groupData.GroupID), zap.String("group_name", groupData.Name), zap.String("topic", rpcFromCoreTopic), zap.Error(token.Error())) + continue + } + ok := token.WaitTimeout(time.Second * 5) + if ok && token.Error() != nil { + a.logger.Error("failed to subscribe to group channel/topic", zap.String("group_id", groupData.GroupID), zap.String("group_name", groupData.Name), zap.String("topic", rpcFromCoreTopic), zap.Error(token.Error())) + continue + } + if !ok { + a.logger.Error("failed to subscribe to group channel/topic: time out", zap.String("group_id", groupData.GroupID), zap.String("group_name", groupData.Name), zap.String("topic", rpcFromCoreTopic)) + continue + } + a.logger.Info("completed RPC subscription to group", zap.String("group_id", groupData.GroupID), zap.String("group_name", groupData.Name), zap.String("topic", rpcFromCoreTopic)) + a.groupsInfos[groupData.GroupID] = GroupInfo{ + Name: groupData.Name, + ChannelID: groupData.ChannelID, + } + } +} diff --git a/agent/config/types.go b/agent/config/types.go new file mode 100644 index 0000000..04ab9ef --- /dev/null +++ b/agent/config/types.go @@ -0,0 +1,56 @@ +package config + +type TLS struct { + Verify bool `mapstructure:"verify"` +} + +type APIConfig struct { + Address string `mapstructure:"address"` + Token string `mapstructure:"token"` +} + +type DBConfig struct { + File string `mapstructure:"file"` +} + +type MQTTConfig struct { + Address string `mapstructure:"address"` + Id string `mapstructure:"id"` + Key string `mapstructure:"key"` + ChannelID string `mapstructure:"channel_id"` +} + +type CloudConfig struct { + AgentName string `mapstructure:"agent_name"` + AutoProvision bool `mapstructure:"auto_provision"` +} + +type Cloud struct { + Config CloudConfig `mapstructure:"config"` + API APIConfig `mapstructure:"api"` + MQTT MQTTConfig `mapstructure:"mqtt"` +} + +type Opentelemetry struct { + Host string `mapstructure:"host"` + Port int `mapstructure:"port"` +} + +type Debug struct { + Enable bool `mapstructure:"enable"` +} + +type OrbAgent struct { + Backends map[string]map[string]string `mapstructure:"backends"` + Tags map[string]string `mapstructure:"tags"` + Cloud Cloud `mapstructure:"cloud"` + TLS TLS `mapstructure:"tls"` + DB DBConfig `mapstructure:"db"` + Otel Opentelemetry `mapstructure:"otel"` + Debug Debug `mapstructure:"debug"` +} + +type Config struct { + Version float64 `mapstructure:"version"` + OrbAgent OrbAgent `mapstructure:"orb"` +} diff --git a/agent/docker/.env b/agent/docker/.env new file mode 100644 index 0000000..6a0ceb0 --- /dev/null +++ b/agent/docker/.env @@ -0,0 +1,196 @@ +# Docker: Environment variables in Compose + +## UI +MF_UI_PORT=3000 +MF_UI_MQTT_WS_URL=ws://localhost/mqtt + +## Prometheus +MF_PROMETHEUS_PORT=9090 + +## Grafana +MF_PUBLIC_GRAFANA_PORT=3001 +MF_GRAFANA_PORT=3000 + +## NginX +MF_NGINX_HTTP_PORT=80 +MF_NGINX_SSL_PORT=443 +MF_NGINX_MQTT_PORT=1883 +MF_NGINX_MQTTS_PORT=8883 + +## NATS +MF_NATS_URL=nats://nats:4222 + +## Redis +MF_REDIS_TCP_PORT=6379 + +## Grafana +MF_GRAFANA_PORT=3000 + +## Jaeger +MF_JAEGER_PORT=6831 +MF_JAEGER_FRONTEND=16686 +MF_JAEGER_COLLECTOR=14268 +MF_JAEGER_CONFIGS=5778 +MF_JAEGER_URL=jaeger:6831 + +## Core Services + +### Auth +MF_AUTH_LOG_LEVEL=debug +MF_AUTH_HTTP_PORT=8189 +MF_AUTH_GRPC_PORT=8181 +MF_AUTH_GRPC_URL=auth:8181 +MF_AUTH_GRPC_TIMEOUT=1s +MF_AUTH_DB_PORT=5432 +MF_AUTH_DB_USER=mainflux +MF_AUTH_DB_PASS=mainflux +MF_AUTH_DB=auth +MF_AUTH_SECRET=secret + +### Keto +MF_KETO_READ_REMOTE_HOST=mainflux-keto +MF_KETO_READ_REMOTE_PORT=4466 +MF_KETO_WRITE_REMOTE_HOST=mainflux-keto +MF_KETO_WRITE_REMOTE_PORT=4467 +MF_KETO_DB_PORT=5432 +MF_KETO_DB_USER=mainflux +MF_KETO_DB_PASS=mainflux +MF_KETO_DB=keto + +### Users +MF_USERS_LOG_LEVEL=debug +MF_USERS_HTTP_PORT=8180 +MF_USERS_DB_PORT=5432 +MF_USERS_DB_USER=mainflux +MF_USERS_DB_PASS=mainflux +MF_USERS_DB=users +MF_USERS_ADMIN_EMAIL=admin@example.com +MF_USERS_ADMIN_PASSWORD=12345678 +MF_USERS_RESET_PWD_TEMPLATE=users.tmpl +MF_USERS_PASS_REGEX=^.{8,}$ +MF_USERS_ALLOW_SELF_REGISTER=true + +### Email utility +MF_EMAIL_HOST=smtp.mailtrap.io +MF_EMAIL_PORT=2525 +MF_EMAIL_USERNAME=18bf7f70705139 +MF_EMAIL_PASSWORD=2b0d302e775b1e +MF_EMAIL_FROM_ADDRESS=from@example.com +MF_EMAIL_FROM_NAME=Example +MF_EMAIL_TEMPLATE=email.tmpl + +### Token utility +MF_TOKEN_RESET_ENDPOINT=/reset-request + +### Things +MF_THINGS_LOG_LEVEL=debug +MF_THINGS_HTTP_PORT=8182 +MF_THINGS_AUTH_HTTP_PORT=8989 +MF_THINGS_AUTH_GRPC_PORT=8183 +MF_THINGS_AUTH_GRPC_URL=things:8183 +MF_THINGS_AUTH_GRPC_TIMEOUT=1s +MF_THINGS_DB_PORT=5432 +MF_THINGS_DB_USER=mainflux +MF_THINGS_DB_PASS=mainflux +MF_THINGS_DB=things +MF_THINGS_ES_URL=localhost:6379 +MF_THINGS_ES_PASS= +MF_THINGS_ES_DB=0 + +### HTTP +MF_HTTP_ADAPTER_PORT=8185 + +### MQTT +MF_MQTT_ADAPTER_LOG_LEVEL=debug +MF_MQTT_ADAPTER_MQTT_PORT=1883 +MF_MQTT_BROKER_PORT=1883 +MF_MQTT_ADAPTER_WS_PORT=8080 +MF_MQTT_BROKER_WS_PORT=8080 +MF_MQTT_ADAPTER_ES_DB=0 +MF_MQTT_ADAPTER_ES_PASS= + +### VERNEMQ +MF_DOCKER_VERNEMQ_ALLOW_ANONYMOUS=on +MF_DOCKER_VERNEMQ_LOG__CONSOLE__LEVEL=error + +# Certs +MF_CERTS_LOG_LEVEL=debug +MF_CERTS_HTTP_PORT=8204 +MF_CERTS_DB_HOST=certs-db +MF_CERTS_DB_PORT=5432 +MF_CERTS_DB_USER=mainflux +MF_CERTS_DB_PASS=mainflux +MF_CERTS_DB=certs +MF_CERTS_DB_SSL_MODE= +MF_CERTS_DB_SSL_CERT= +MF_CERTS_DB_SSL_KEY= +MF_CERTS_DB_SSL_ROOT_CERT= +MF_CERTS_ENCRYPT_KEY= +MF_CERTS_CLIENT_TLS= +MF_CERTS_CA_CERTS= +MF_CERTS_SERVER_CERT= +MF_CERTS_SERVER_KEY= +MF_SDK_THINGS_URL=http://172.17.0.1 +MF_CERTS_SIGN_CA_PATH=/etc/ssl/certs/ca.crt +MF_CERTS_SIGN_CA_KEY_PATH=/etc/ssl/certs/ca.key +MF_CERTS_SIGN_HOURS_VALID=2048h +MF_CERTS_SIGN_RSA_BITS=2048 +MF_CERTS_VAULT_HOST=http://vault:8200 + + +### Vault +MF_VAULT_HOST=vault +MF_VAULT_PORT=8200 +MF_VAULT_UNSEAL_KEY_1= +MF_VAULT_UNSEAL_KEY_2= +MF_VAULT_UNSEAL_KEY_3= +MF_VAULT_TOKEN= +MF_VAULT_CA_NAME=mainflux +MF_VAULT_CA_ROLE_NAME=mainflux +MF_VAULT_PKI_PATH=pki +MF_VAULT_PKI_INT_PATH=pki_int +MF_VAULT_CA_CN=mainflux.com +MF_VAULT_CA_OU='Mainflux Cloud' +MF_VAULT_CA_O='Mainflux Labs' +MF_VAULT_CA_C=Serbia +MF_VAULT_CA_L=Belgrade + +### SMTP Notifier +MF_SMTP_NOTIFIER_PORT=8906 +MF_SMTP_NOTIFIER_LOG_LEVEL=debug +MF_SMTP_NOTIFIER_DB_PORT=5432 +MF_SMTP_NOTIFIER_DB_USER=mainflux +MF_SMTP_NOTIFIER_DB_PASS=mainflux +MF_SMTP_NOTIFIER_DB=subscriptions +MF_SMTP_NOTIFIER_TEMPLATE=smtp-notifier.tmpl + +# Docker image tag +MF_RELEASE_TAG=0.13.0 +ORB_RELEASE_TAG=develop + +# Orb: fleet +ORB_FLEET_HTTP_PORT=8203 +ORB_FLEET_DB_USER=orb +ORB_FLEET_DB_PASS=orb +ORB_FLEET_DB_DB=fleet +ORB_FLEET_GRPC_PORT=8283 +ORB_FLEET_GRPC_URL=fleet:8283 +ORB_FLEET_GRPC_TIMEOUT=1s + +# Orb: policies +ORB_POLICIES_HTTP_PORT=8202 +ORB_POLICIES_DB_USER=orb +ORB_POLICIES_DB_PASS=orb +ORB_POLICIES_DB_DB=policies +ORB_POLICIES_GRPC_PORT=8282 +ORB_POLICIES_GRPC_URL=policies:8282 +ORB_POLICIES_GRPC_TIMEOUT=1s + +# Orb: sinks +ORB_SINKS_HTTP_PORT=8200 +ORB_SINKS_DB_USER=orb +ORB_SINKS_DB_PASS=orb +ORB_SINKS_DB_DB=sinks +ORB_SINKS_GRPC_PORT=8280 +ORB_SINKS_GRPC_URL=sinks:8280 +ORB_SINKS_GRPC_TIMEOUT=1s \ No newline at end of file diff --git a/agent/docker/Dockerfile b/agent/docker/Dockerfile new file mode 100644 index 0000000..89c4a75 --- /dev/null +++ b/agent/docker/Dockerfile @@ -0,0 +1,35 @@ +ARG PKTVISOR_TAG=latest-develop +ARG OTEL_TAG=0.111.0 + +FROM golang:1.23-alpine AS builder + +WORKDIR /go/src/github.com/netboxlabs/orb-agent +COPY go.mod . +RUN go mod tidy +COPY . . +RUN apk update && apk add --no-cache build-base git make +RUN mkdir /tmp/build && CGO_ENABLED=1 make agent_bin && mv build/orb-agent /tmp/build/orb-agent + +FROM otel/opentelemetry-collector-contrib:${OTEL_TAG} AS otelcol-contrib + +FROM orbcommunity/pktvisor:${PKTVISOR_TAG} + +RUN addgroup --system netdev && useradd -m --shell /bin/bash -G netdev appuser && echo "appuser ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers + +# Create necessary directories and set ownership to appuser +RUN mkdir -p /opt/orb && chown appuser:appuser /opt/orb \ + && chown appuser:appuser /usr/local/bin \ + && chown appuser:appuser /var/run + +COPY --from=builder /tmp/build/orb-agent /usr/local/bin/orb-agent +COPY --from=builder /go/src/github.com/netboxlabs/orb-agent/agent/docker/agent_default.yaml /opt/orb/agent_default.yaml +COPY --from=builder /go/src/github.com/netboxlabs/orb-agent/agent/docker/orb-agent-entry.sh /usr/local/bin/orb-agent-entry.sh +COPY --from=builder /go/src/github.com/netboxlabs/orb-agent/agent/docker/run-agent.sh /run-agent.sh + +COPY --from=otelcol-contrib /otelcol-contrib /usr/local/bin/otelcol-contrib + +RUN chmod a+x /run-agent.sh + +USER appuser + +ENTRYPOINT [ "/usr/local/bin/orb-agent-entry.sh" ] diff --git a/agent/docker/agent_default.yaml b/agent/docker/agent_default.yaml new file mode 100644 index 0000000..f9df5aa --- /dev/null +++ b/agent/docker/agent_default.yaml @@ -0,0 +1,13 @@ +version: "1.0" + +visor: + taps: + +orb: + backends: + pktvisor: + binary: /usr/local/sbin/pktvisord + config_file: /opt/orb/agent_default.yaml + otel: + binary: /usr/local/bin/otelcol-contrib + config_file: /opt/orb/agent_default.yaml \ No newline at end of file diff --git a/agent/docker/orb-agent-entry.sh b/agent/docker/orb-agent-entry.sh new file mode 100755 index 0000000..cbf354b --- /dev/null +++ b/agent/docker/orb-agent-entry.sh @@ -0,0 +1,194 @@ +#!/usr/bin/env bash +# +# entry point for orb-agent +# + +agentstop1 () { + printf "\rFinishing container.." + exit 0 +} + +agentstop2 () { + if [ -f "/var/run/orb-agent.pid" ]; then + ID=$(cat /var/run/orb-agent.pid) + kill -15 $ID + fi +} + +# check geodb folder and extract db +cd /geo-db/ +if [ -f "asn.mmdb.gz" ]; then + gzip -d asn.mmdb.gz + gzip -d city.mmdb.gz +fi + +## Cloud API Configuration ## +# support generating API and MQTT addresses with one host name in ORB_CLOUD_ADDRESS +if [[ -n "${ORB_CLOUD_ADDRESS}" ]]; then + ORB_CLOUD_API_ADDRESS="https://${ORB_CLOUD_ADDRESS}" + ORB_CLOUD_MQTT_ADDRESS="tls://${ORB_CLOUD_ADDRESS}:8883" + export ORB_CLOUD_API_ADDRESS ORB_CLOUD_MQTT_ADDRESS +fi + +## Agent Configuration ## +# support generating simple default pktvisor PCAP taps +tmpfile=$(mktemp /tmp/orb-agent-pktvisor-conf.XXXXXX) +trap 'rm -f "$tmpfile"' EXIT +trap agentstop1 SIGINT +trap agentstop2 SIGTERM + +#Add default configuration to a file +( +cat < "$tmpfile" + +# Checking agent.yaml config file +CONFIG_FILE_EXISTS=false +if [ -f "/opt/orb/agent.yaml" ]; then + echo "Contains default config file" + CONFIG_FILE_EXISTS=true +elif [[ "$2" == '-c' || "$3" == '-c' ]]; then + echo "Contains configuration argument parameter" + CONFIG_FILE_EXISTS=true +else + echo "Configuration file not provided, default configuration file location is /opt/orb/agent.yaml" +fi + +# Check NetFlow TAP parameters +if [ "${PKTVISOR_NETFLOW_BIND_ADDRESS}" = '' ]; then + PKTVISOR_NETFLOW_BIND_ADDRESS='0.0.0.0' +fi +if [ "${PKTVISOR_NETFLOW_PORT_DEFAULT}" = '' ]; then + PKTVISOR_NETFLOW_PORT_DEFAULT='2055' +fi +if [ "${PKTVISOR_NETFLOW}" = 'true' ]; then +echo "Setting default_netflow as visor tap" +( +cat <> "$tmpfile" + export ORB_BACKENDS_PKTVISOR_CONFIG_FILE="$tmpfile" +fi + +# Check SFlow tap Parameters +if [ "${PKTVISOR_SFLOW_BIND_ADDRESS}" = '' ]; then + PKTVISOR_SFLOW_BIND_ADDRESS='0.0.0.0' +fi +if [ "${PKTVISOR_SFLOW_PORT_DEFAULT}" = '' ]; then + PKTVISOR_SFLOW_PORT_DEFAULT='6343' +fi +if [ "${PKTVISOR_SFLOW}" = 'true' ]; then +echo "Setting default_sflow as visor tap" +( +cat <> "$tmpfile" + export ORB_BACKENDS_PKTVISOR_CONFIG_FILE="$tmpfile" +fi + +# Check DNS TAP Parameters +if [ "${PKTVISOR_DNSTAP_BIND_ADDRESS}" = '' ]; then + PKTVISOR_DNSTAP_BIND_ADDRESS='0.0.0.0' +fi +if [ "${PKTVISOR_DNSTAP_PORT_DEFAULT}" = '' ]; then + PKTVISOR_DNSTAP_PORT_DEFAULT='6000' +fi +if [ "${PKTVISOR_DNSTAP}" = 'true' ]; then +echo "Setting default_dnstap as visor tap" +( +cat <> "$tmpfile" + export ORB_BACKENDS_PKTVISOR_CONFIG_FILE="$tmpfile" +fi + +# special case: if the iface is "mock", then use "mock" pcap source +if [ "$PKTVISOR_PCAP_IFACE_DEFAULT" = 'mock' ]; then + MAYBE_MOCK='pcap_source: mock' +fi +if [[ -n "${PKTVISOR_PCAP_IFACE_DEFAULT}" && $CONFIG_FILE_EXISTS == false || "${PKTVISOR_PCAP}" == 'true' || "${PKTVISOR_DNSTAP}" != 'true' && "${PKTVISOR_SFLOW}" != 'true' && "${PKTVISOR_NETFLOW}" != 'true' ]]; then + echo "Setting default_pcap as visor tap" + if [ "$PKTVISOR_PCAP_IFACE_DEFAULT" = '' ]; then + PKTVISOR_PCAP_IFACE_DEFAULT='auto' + fi +( +cat <>"$tmpfile" + export ORB_BACKENDS_PKTVISOR_CONFIG_FILE="$tmpfile" +fi + +# or specify pair of TAPNAME:IFACE +# TODO allow multiple, split on comma +# PKTVISOR_PCAP_IFACE_TAPS=default_pcap:en0 +# eternal loop +while true +do + # pid file dont exist + if [ ! -f "/var/run/orb-agent.pid" ]; then + # running orb-agent in background + if [[ "$2" == '-c' || "$3" == '-c' ]]; then + # if config file was passed, drop the built-in pktvisor configuration file + echo "Running with config file parameter" + ORB_BACKENDS_PKTVISOR_CONFIG_FILE="" + nohup /run-agent.sh "$@" & + else + if [[ $CONFIG_FILE_EXISTS == true ]]; then + # if config file is mounted, drop the built-in pktvisor configuration file + echo "Running with config file mounted" + ORB_BACKENDS_PKTVISOR_CONFIG_FILE="" + nohup /run-agent.sh "$@" & + else + # if none config file is set, use the built-in pktvisor configuration file and agent_default.yaml + echo "Running with default config file and pktvisor built-in configuration" + # checking if debug mode is enabled + DEBUG='' + if [[ "$2" == '-d' ]]; then + echo "Debug mode enabled" + DEBUG='-d' + fi + nohup /run-agent.sh run -c /opt/orb/agent_default.yaml $DEBUG & + fi + fi + sleep 2 + if [ -d "/nohup.out" ]; then + tail -f /nohup.out & + fi + else + PID=$(cat /var/run/orb-agent.pid) + if [ ! -d "/proc/$PID" ]; then + # stop container + echo "$PID is not running" + rm /var/run/orb-agent.pid + exit 1 + fi + sleep 5 + fi +done diff --git a/agent/docker/run-agent.sh b/agent/docker/run-agent.sh new file mode 100644 index 0000000..1ffa631 --- /dev/null +++ b/agent/docker/run-agent.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# orb agent binary location. by default, matches orb-agent container (see Dockerfile) +orb_agent_bin="${ORB_AGENT_BIN:-/usr/local/bin/orb-agent}" +echo "Starting orb-agent : $orb_agent_bin with args $#" + +if [ $# -eq 0 ]; then + "$orb_agent_bin" run & + echo $! > /var/run/orb-agent.pid +else + "$orb_agent_bin" "$@" & + echo $! > /var/run/orb-agent.pid +fi diff --git a/agent/heartbeats.go b/agent/heartbeats.go new file mode 100644 index 0000000..92bed17 --- /dev/null +++ b/agent/heartbeats.go @@ -0,0 +1,161 @@ +package agent + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/netboxlabs/orb-agent/agent/backend" + "github.com/netboxlabs/orb-agent/agent/policies" + "github.com/orb-community/orb/fleet" + "go.uber.org/zap" +) + +// HeartbeatFreq how often to heartbeat +const HeartbeatFreq = 50 * time.Second + +// RestartTimeMin minimum time to wait between restarts +const RestartTimeMin = 5 * time.Minute + +func (a *orbAgent) sendSingleHeartbeat(ctx context.Context, t time.Time, agentsState fleet.State) { + + if a.heartbeatsTopic == "" { + a.logger.Debug("heartbeat topic not yet set, skipping") + return + } + + a.logger.Debug("heartbeat", zap.String("state", agentsState.String())) + + bes := make(map[string]fleet.BackendStateInfo) + for name, be := range a.backends { + if agentsState == fleet.Offline { + bes[name] = fleet.BackendStateInfo{State: backend.Offline.String()} + continue + } + besi := fleet.BackendStateInfo{} + backendStatus, errMsg, err := be.GetRunningStatus() + a.backendState[name].Status = backendStatus + besi.State = backendStatus.String() + if backendStatus != backend.Running { + a.logger.Error("backend not ready", zap.String("backend", name), zap.String("status", backendStatus.String()), zap.String("errMsg", errMsg), zap.Error(err)) + if err != nil { + a.backendState[name].LastError = fmt.Sprintf("failed to retrieve backend status: %v", err) + } else if errMsg != "" { + a.backendState[name].LastError = errMsg + } + // status is not running so we have a current error + besi.Error = a.backendState[name].LastError + if time.Now().Sub(be.GetStartTime()) >= RestartTimeMin { + a.logger.Info("attempting backend restart due to failed status during heartbeat") + if a.config.OrbAgent.Cloud.MQTT.Id != "" { + ctx = context.WithValue(ctx, "agent_id", a.config.OrbAgent.Cloud.MQTT.Id) + } else { + ctx = context.WithValue(ctx, "agent_id", "auto-provisioning-without-id") + } + err := a.RestartBackend(ctx, name, "failed during heartbeat") + if err != nil { + a.logger.Error("failed to restart backend", zap.Error(err), zap.String("backend", name)) + } + } else { + a.logger.Info("waiting to attempt backend restart due to failed status", zap.Duration("remaining_secs", RestartTimeMin-(time.Now().Sub(be.GetStartTime())))) + } + } else { + // status is Running so no current error + besi.Error = "" + } + if a.backendState[name].LastError != "" { + besi.LastError = a.backendState[name].LastError + } + if !a.backendState[name].LastRestartTS.IsZero() { + besi.LastRestartTS = a.backendState[name].LastRestartTS + } + if a.backendState[name].RestartCount > 0 { + besi.RestartCount = a.backendState[name].RestartCount + } + if a.backendState[name].LastRestartReason != "" { + besi.LastRestartReason = a.backendState[name].LastRestartReason + } + bes[name] = besi + } + + ps := make(map[string]fleet.PolicyStateInfo) + pdata, err := a.policyManager.GetPolicyState() + if err == nil { + for _, pd := range pdata { + pstate := policies.Offline.String() + // if agent is not offline, default to status that policy manager believes we should be in + if agentsState != fleet.Offline { + pstate = pd.State.String() + } + // but if the policy backend is not running, policy isn't either + if bestate, ok := a.backendState[pd.Backend]; ok && bestate.Status != backend.Running { + pstate = policies.Unknown.String() + pd.BackendErr = "backend is unreachable" + } + ps[pd.ID] = fleet.PolicyStateInfo{ + Name: pd.Name, + Version: pd.Version, + State: pstate, + Error: pd.BackendErr, + Datasets: pd.GetDatasetIDs(), + LastScrapeTS: pd.LastScrapeTS, + LastScrapeBytes: pd.LastScrapeBytes, + Backend: pd.Backend, + } + } + } else { + a.logger.Error("unable to retrieved policy state", zap.Error(err)) + } + + ag := make(map[string]fleet.GroupStateInfo) + for id, groupInfo := range a.groupsInfos { + ag[id] = fleet.GroupStateInfo{ + GroupName: groupInfo.Name, + GroupChannel: groupInfo.ChannelID, + } + } + + hbData := fleet.Heartbeat{ + SchemaVersion: fleet.CurrentHeartbeatSchemaVersion, + State: agentsState, + TimeStamp: t, + BackendState: bes, + PolicyState: ps, + GroupState: ag, + } + + body, err := json.Marshal(hbData) + if err != nil { + a.logger.Error("error marshalling heartbeat", zap.Error(err)) + return + } + + if token := a.client.Publish(a.heartbeatsTopic, 1, false, body); token.Wait() && token.Error() != nil { + a.logger.Error("error sending heartbeat", zap.Error(token.Error())) + err = a.restartComms(ctx) + if err != nil { + a.logger.Error("error reconnecting with MQTT, stopping agent") + a.Stop(ctx) + } + } +} + +func (a *orbAgent) sendHeartbeats(ctx context.Context, cancelFunc context.CancelFunc) { + a.logger.Debug("start heartbeats routine", zap.Any("routine", ctx.Value("routine"))) + a.sendSingleHeartbeat(ctx, time.Now(), fleet.Online) + defer func() { + cancelFunc() + }() + for { + select { + case <-ctx.Done(): + a.logger.Debug("context done, stopping heartbeats routine") + a.sendSingleHeartbeat(ctx, time.Now(), fleet.Offline) + a.heartbeatCtx = nil + return + case t := <-a.hbTicker.C: + a.sendSingleHeartbeat(ctx, t, fleet.Online) + } + } +} diff --git a/agent/logging.go b/agent/logging.go new file mode 100644 index 0000000..37bd5ed --- /dev/null +++ b/agent/logging.go @@ -0,0 +1,49 @@ +package agent + +import ( + mqtt "github.com/eclipse/paho.mqtt.golang" + "go.uber.org/zap" +) + +type agentLoggerDebug struct { + a *orbAgent +} +type agentLoggerWarn struct { + a *orbAgent +} +type agentLoggerCritical struct { + a *orbAgent +} +type agentLoggerError struct { + a *orbAgent +} + +var _ mqtt.Logger = (*agentLoggerDebug)(nil) +var _ mqtt.Logger = (*agentLoggerWarn)(nil) +var _ mqtt.Logger = (*agentLoggerCritical)(nil) +var _ mqtt.Logger = (*agentLoggerError)(nil) + +func (a *agentLoggerWarn) Println(v ...interface{}) { + a.a.logger.Warn("WARN mqtt log", zap.Any("payload", v)) +} +func (a *agentLoggerWarn) Printf(format string, v ...interface{}) { + a.a.logger.Warn("WARN mqtt log", zap.Any("payload", v)) +} +func (a *agentLoggerDebug) Println(v ...interface{}) { + a.a.logger.Debug("DEBUG mqtt log", zap.Any("payload", v)) +} +func (a *agentLoggerDebug) Printf(format string, v ...interface{}) { + a.a.logger.Debug("DEBUG mqtt log", zap.Any("payload", v)) +} +func (a *agentLoggerCritical) Println(v ...interface{}) { + a.a.logger.Error("CRITICAL mqtt log", zap.Any("payload", v)) +} +func (a *agentLoggerCritical) Printf(format string, v ...interface{}) { + a.a.logger.Error("CRITICAL mqtt log", zap.Any("payload", v)) +} +func (a *agentLoggerError) Println(v ...interface{}) { + a.a.logger.Error("ERROR mqtt log", zap.Any("payload", v)) +} +func (a *agentLoggerError) Printf(format string, v ...interface{}) { + a.a.logger.Error("ERROR mqtt log", zap.Any("payload", v)) +} diff --git a/agent/otel/bridgeservice.go b/agent/otel/bridgeservice.go new file mode 100644 index 0000000..b0192db --- /dev/null +++ b/agent/otel/bridgeservice.go @@ -0,0 +1,54 @@ +package otel + +import ( + "context" + "strings" + + "github.com/netboxlabs/orb-agent/agent/policies" +) + +type AgentBridgeService interface { + RetrieveAgentInfoByPolicyName(policyName string) (*AgentDataPerPolicy, error) + NotifyAgentDisconnection(ctx context.Context, err error) +} + +type AgentDataPerPolicy struct { + PolicyID string + Datasets string + AgentTags map[string]string +} + +var _ AgentBridgeService = (*BridgeService)(nil) + +type BridgeService struct { + bridgeContext context.Context + cancelFunc context.CancelCauseFunc + policyRepo policies.PolicyRepo + AgentTags map[string]string +} + +func NewBridgeService(ctx context.Context, cancelFunc context.CancelCauseFunc, policyRepo *policies.PolicyRepo, agentTags map[string]string) *BridgeService { + return &BridgeService{ + bridgeContext: ctx, + cancelFunc: cancelFunc, + policyRepo: *policyRepo, + AgentTags: agentTags, + } +} + +func (b *BridgeService) RetrieveAgentInfoByPolicyName(policyName string) (*AgentDataPerPolicy, error) { + pData, err := b.policyRepo.GetByName(policyName) + if err != nil { + return nil, err + } + return &AgentDataPerPolicy{ + PolicyID: pData.ID, + Datasets: strings.Join(pData.GetDatasetIDs(), ","), + AgentTags: b.AgentTags, + }, nil +} + +func (b *BridgeService) NotifyAgentDisconnection(ctx context.Context, err error) { + b.cancelFunc(err) + ctx.Done() +} diff --git a/agent/otel/otlpmqttexporter/README.md b/agent/otel/otlpmqttexporter/README.md new file mode 100644 index 0000000..21ab0e2 --- /dev/null +++ b/agent/otel/otlpmqttexporter/README.md @@ -0,0 +1,59 @@ +# OTLP/MQTT Exporter + + +Alt 1. Test Orb Agent with Open Telemetry +use this config in localconifg/config.yaml +```yaml +otel: + enable: true +``` + +Exports traces and/or metrics via MQTT using [OTLP]( +https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md) +format. + +Supported pipeline types: traces, metrics, logs + +:warning: OTLP logs format is currently marked as "Beta" and may change in +incompatible ways. + +The following settings are required: + +- `endpoint` (no default): The target base URL to send data to (e.g.: https://example.com:4318). + To send each signal a corresponding path will be added to this base URL, i.e. for traces + "/v1/traces" will appended, for metrics "/v1/metrics" will be appended, for logs + "/v1/logs" will be appended. + +The following settings can be optionally configured: + +- `traces_channel` (no default): The target URL to send trace data to (e.g.: https://example.com:4318/v1/traces). + If this setting is present the `endpoint` setting is ignored for traces. +- `metrics_channel` (no default): The target URL to send metric data to (e.g.: https://example.com:4318/v1/metrics). + If this setting is present the `endpoint` setting is ignored for metrics. +- `logs_channel` (no default): The target URL to send log data to (e.g.: https://example.com:4318/v1/logs). + If this setting is present the `endpoint` setting is ignored logs. +- `tls`: see [TLS Configuration Settings](../../config/configtls/README.md) for the full set of available options. + +[//]: # (-Not sure yet if this will apply `read_buffer_size` (default = 0): ReadBufferSize for MQTT client.) + +[//]: # (-Not sure yet if this will apply `write_buffer_size` (default = 512 * 1024): WriteBufferSize for HTTP client.) + +Example: + +```yaml +exporters: + otlpmqtt: + endpoint: https://example.com:4318/v1/traces +``` + +By default `gzip` compression is enabled. See [compression comparison](../../config/configgrpc/README.md#compression-comparison) for details benchmark information. To disable, configure as follows: + +```yaml +exporters: + otlpmqtt: + ... + compression: none +``` + +The full list of settings exposed for this exporter are documented [here](./config.go) +with detailed sample configurations [here](./testdata/config.yaml). diff --git a/agent/otel/otlpmqttexporter/config.go b/agent/otel/otlpmqttexporter/config.go new file mode 100644 index 0000000..b73c77a --- /dev/null +++ b/agent/otel/otlpmqttexporter/config.go @@ -0,0 +1,43 @@ +package otlpmqttexporter + +import ( + "fmt" + + mqtt "github.com/eclipse/paho.mqtt.golang" + "github.com/netboxlabs/orb-agent/agent/otel" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +// Config defines configuration for OTLP/HTTP exporter. +type Config struct { + exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueSettings `mapstructure:"sending_queue"` + exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` + + // Add Client directly to only re-use an existing connection - requires "github.com/eclipse/paho.mqtt.golang" + Client *mqtt.Client + + // Configuration to connect to MQTT + Address string `mapstructure:"address"` + Id string `mapstructure:"id"` + Key string `mapstructure:"key"` + ChannelID string `mapstructure:"channel_id"` + TLS bool `mapstructure:"enable_tls"` + Topic string `mapstructure:"topic"` + + // Specific for ORB Agent + PktVisorVersion string `mapstructure:"pktvisor_version"` + OrbAgentService otel.AgentBridgeService +} + +var _ component.Config = (*Config)(nil) + +// Validate checks if the exporter configuration is valid +func (cfg *Config) Validate() error { + if ((cfg.Address != "" && cfg.Id != "" && cfg.Key != "" && cfg.ChannelID != "") || + cfg.Client != nil) && cfg.Topic != "" { + return nil + } + return fmt.Errorf("invalid mqtt configuration") +} diff --git a/agent/otel/otlpmqttexporter/config_test.go b/agent/otel/otlpmqttexporter/config_test.go new file mode 100644 index 0000000..6de234f --- /dev/null +++ b/agent/otel/otlpmqttexporter/config_test.go @@ -0,0 +1,17 @@ +package otlpmqttexporter + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" +) + +func TestUnmarshalDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NoError(t, component.UnmarshalConfig(confmap.New(), cfg)) + assert.Equal(t, factory.CreateDefaultConfig(), cfg) +} diff --git a/agent/otel/otlpmqttexporter/doc.go b/agent/otel/otlpmqttexporter/doc.go new file mode 100644 index 0000000..afbb8b1 --- /dev/null +++ b/agent/otel/otlpmqttexporter/doc.go @@ -0,0 +1,2 @@ +// Package otlpmqttexporter exports data by using the OTLP format to an MQTT endpoint. +package otlpmqttexporter diff --git a/agent/otel/otlpmqttexporter/factory.go b/agent/otel/otlpmqttexporter/factory.go new file mode 100644 index 0000000..a9d9fc7 --- /dev/null +++ b/agent/otel/otlpmqttexporter/factory.go @@ -0,0 +1,163 @@ +package otlpmqttexporter + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/sdk/metric" + + "github.com/netboxlabs/orb-agent/agent/otel" + "go.uber.org/zap" + + mqtt "github.com/eclipse/paho.mqtt.golang" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/otel/trace" +) + +const ( + // The value of "type" key in configuration. + typeStr = "otlpmqtt" + defaultMQTTAddr = "localhost:1883" + defaultMQTTId = "uuid1" + defaultMQTTKey = "uuid2" + defaultName = "pktvisor" + // For testing will disable TLS + defaultTLS = false +) + +// NewFactory creates a factory for OTLP exporter. +// Reducing the scope to just Metrics since it is our use-case +func NewFactory() exporter.Factory { + return exporter.NewFactory( + typeStr, + CreateDefaultConfig, + exporter.WithMetrics(CreateMetricsExporter, component.StabilityLevelStable)) +} + +func CreateConfig(addr, id, key, channel, pktvisor, topic string, bridgeService otel.AgentBridgeService) component.Config { + return &Config{ + TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), + QueueSettings: exporterhelper.NewDefaultQueueSettings(), + RetrySettings: exporterhelper.NewDefaultRetrySettings(), + Topic: topic, + Address: addr, + Id: id, + Key: key, + ChannelID: channel, + PktVisorVersion: pktvisor, + OrbAgentService: bridgeService, + } +} + +func CreateDefaultSettings(logger *zap.Logger) exporter.CreateSettings { + return exporter.CreateSettings{ + TelemetrySettings: component.TelemetrySettings{ + Logger: logger, + TracerProvider: trace.NewNoopTracerProvider(), + MeterProvider: metric.NewMeterProvider(), + }, + BuildInfo: component.NewDefaultBuildInfo(), + } +} + +func CreateDefaultConfig() component.Config { + base := fmt.Sprintf("channels/%s/messages", defaultMQTTId) + topic := fmt.Sprintf("%s/otlp/%s", base, defaultName) + return &Config{ + TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), + QueueSettings: exporterhelper.NewDefaultQueueSettings(), + RetrySettings: exporterhelper.NewDefaultRetrySettings(), + Address: defaultMQTTAddr, + Id: defaultMQTTId, + Key: defaultMQTTKey, + ChannelID: base, + TLS: defaultTLS, + Topic: topic, + } +} + +func CreateConfigClient(client *mqtt.Client, topic, pktvisor string, bridgeService otel.AgentBridgeService) component.Config { + return &Config{ + TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), + QueueSettings: exporterhelper.NewDefaultQueueSettings(), + RetrySettings: exporterhelper.NewDefaultRetrySettings(), + Client: client, + Topic: topic, + PktVisorVersion: pktvisor, + OrbAgentService: bridgeService, + } +} + +func CreateTracesExporter( + ctx context.Context, + set exporter.CreateSettings, + cfg component.Config, +) (exporter.Traces, error) { + oce, err := newExporter(cfg, set, ctx) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + + return exporterhelper.NewTracesExporter( + ctx, + set, + cfg, + oce.pushTraces, + exporterhelper.WithStart(oce.start), + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings)) +} + +func CreateMetricsExporter( + ctx context.Context, + set exporter.CreateSettings, + cfg component.Config, +) (exporter.Metrics, error) { + oce, err := newExporter(cfg, set, ctx) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + return exporterhelper.NewMetricsExporter( + ctx, + set, + cfg, + oce.pushMetrics, + exporterhelper.WithStart(oce.start), + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings)) +} + +func CreateLogsExporter( + ctx context.Context, + set exporter.CreateSettings, + cfg component.Config, +) (exporter.Logs, error) { + oce, err := newExporter(cfg, set, ctx) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + + return exporterhelper.NewLogsExporter( + ctx, + set, + cfg, + oce.pushLogs, + exporterhelper.WithStart(oce.start), + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings)) +} diff --git a/agent/otel/otlpmqttexporter/factory_test.go b/agent/otel/otlpmqttexporter/factory_test.go new file mode 100644 index 0000000..9b386ce --- /dev/null +++ b/agent/otel/otlpmqttexporter/factory_test.go @@ -0,0 +1,137 @@ +package otlpmqttexporter + +import ( + "context" + "crypto/tls" + "fmt" + "testing" + "time" + + mqtt "github.com/eclipse/paho.mqtt.golang" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter/exportertest" + "go.uber.org/zap" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + testedCfg, ok := factory.CreateDefaultConfig().(*Config) + assert.True(t, ok) + assert.Equal(t, "localhost:1883", testedCfg.Address, "default address is localhost") + assert.Equal(t, "uuid1", testedCfg.Id, "default id is uuid1") + assert.Equal(t, "uuid2", testedCfg.Key, "default key uuid1") + assert.Equal(t, "channels/uuid1/messages", testedCfg.ChannelID, "default channel ID agent_test_metrics ") + assert.False(t, testedCfg.TLS, "default TLS is disabled") + assert.Equal(t, "channels/uuid1/messages/otlp/pktvisor", testedCfg.Topic, "default metrics topic is nil, only passed in the export function") +} + +func TestCreateMetricsExporter(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + + set := exportertest.NewNopCreateSettings() + ctx := context.Background() + ctx = context.WithValue(ctx, "policy_name", "test") + ctx = context.WithValue(ctx, "policy_id", "test") + ctx = context.WithValue(ctx, "all", false) + oexp, err := factory.CreateMetricsExporter(ctx, set, cfg) + require.Nil(t, err) + require.NotNil(t, oexp) +} + +func makeMQTTConnectedClient(t *testing.T) (client mqtt.Client, err error) { + opts := mqtt.NewClientOptions().AddBroker("localhost:1889").SetClientID("1dad1121-4b05-4af8-9321-c541e252fe4b") + opts.SetUsername("1dad1121-4b05-4af8-9321-c541e252fe4b") + opts.SetPassword("2a2aabd8-927f-4c58-9dc4-2de784cf9644") + opts.SetKeepAlive(10 * time.Second) + opts.SetDefaultPublishHandler(func(client mqtt.Client, message mqtt.Message) { + t.Error("message on unknown channel, ignoring", zap.String("topic", message.Topic()), zap.ByteString("payload", message.Payload())) + }) + opts.SetPingTimeout(5 * time.Second) + opts.SetAutoReconnect(true) + + opts.TLSConfig = &tls.Config{InsecureSkipVerify: true} + + client = mqtt.NewClient(opts) + if token := client.Connect(); token.Wait() && token.Error() != nil { + return nil, token.Error() + } + + return client, nil +} + +func TestCreateConfigClient(t *testing.T) { + t.Skip("This test requires a local mqtt broker, unskip it locally") + type args struct { + client mqtt.Client + metricsTopic string + } + + client, err := makeMQTTConnectedClient(t) + require.Nil(t, err) + + tests := []struct { + name string + args args + want error + }{ + { + name: "ok client", + args: args{ + client: client, + metricsTopic: "topic", + }, + want: nil, + }, + { + name: "nil client", + args: args{ + client: nil, + metricsTopic: "", + }, + want: fmt.Errorf("invalid mqtt configuration"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &tt.args.client + got := CreateConfigClient(c, tt.args.metricsTopic, " 1.0", nil) + assert.Equal(t, tt.want, component.ValidateConfig(got), "expected %s but got %s", tt.want, component.ValidateConfig(got)) + }) + } +} + +func TestCreateConfig(t *testing.T) { + t.Skip(" only run this if local mqtt is installed locally at port 1889") + type args struct { + addr string + id string + key string + channel string + } + tests := []struct { + name string + args args + want component.Config + }{ + { + name: "local mqtt", + args: args{ + addr: "localhost:1889", + id: "uuid1", + key: "uuid1", + channel: "channels/uuid1/channel/metrics", + }, + want: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, CreateConfig(tt.args.addr, tt.args.id, tt.args.key, tt.args.channel, "1.0", "metricstopic", nil), "CreateConfig(%v, %v, %v, %v)", tt.args.addr, tt.args.id, tt.args.key, tt.args.channel) + }) + } +} diff --git a/agent/otel/otlpmqttexporter/otlp.go b/agent/otel/otlpmqttexporter/otlp.go new file mode 100644 index 0000000..f2f0cb9 --- /dev/null +++ b/agent/otel/otlpmqttexporter/otlp.go @@ -0,0 +1,310 @@ +package otlpmqttexporter + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "net/http" + "net/url" + "runtime" + "sort" + "strings" + "time" + + "github.com/andybalholm/brotli" + mqtt "github.com/eclipse/paho.mqtt.golang" + "go.opentelemetry.io/collector/consumer/consumererror" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/plog/plogotlp" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" + "go.uber.org/zap" +) + +type baseExporter struct { + // Input configuration. + config *Config + client *http.Client + logger *zap.Logger + settings component.TelemetrySettings + // Default user-agent header. + userAgent string +} + +func (e *baseExporter) compressBrotli(data []byte) []byte { + var b bytes.Buffer + w := brotli.NewWriterLevel(&b, brotli.BestCompression) + _, err := w.Write(data) + if err != nil { + return nil + } + err = w.Close() + if err != nil { + return nil + } + return b.Bytes() +} + +// Crete new exporter. +func newExporter(cfg component.Config, set exporter.CreateSettings, ctx context.Context) (*baseExporter, error) { + oCfg := cfg.(*Config) + if oCfg.Address != "" { + _, err := url.Parse(oCfg.Address) + if err != nil { + return nil, errors.New("address must be a valid mqtt server") + } + } + + userAgent := fmt.Sprintf("%s/%s (%s/%s)", + set.BuildInfo.Description, set.BuildInfo.Version, runtime.GOOS, runtime.GOARCH) + + // Client construction is deferred to start + return &baseExporter{ + config: oCfg, + logger: set.Logger, + userAgent: userAgent, + settings: set.TelemetrySettings, + }, nil +} + +// start actually creates the MQTT client. +func (e *baseExporter) start(_ context.Context, _ component.Host) error { + token := e.config.Client + if token == nil { + opts := mqtt.NewClientOptions().AddBroker(e.config.Address).SetClientID(e.config.Id) + opts.SetUsername(e.config.Id) + opts.SetPassword(e.config.Key) + opts.SetKeepAlive(10 * time.Second) + opts.SetDefaultPublishHandler(func(client mqtt.Client, message mqtt.Message) { + e.logger.Info("message on unknown channel, ignoring", zap.String("topic", message.Topic()), zap.ByteString("payload", message.Payload())) + }) + opts.SetPingTimeout(5 * time.Second) + opts.SetAutoReconnect(true) + + if e.config.TLS { + opts.TLSConfig = &tls.Config{InsecureSkipVerify: true} + } + + client := mqtt.NewClient(opts) + if token := client.Connect(); token.Wait() && token.Error() != nil { + return token.Error() + } + e.config.Client = &client + } + + return nil +} + +// inject attribute on all ScopeMetrics metrics +func (e *baseExporter) injectScopeMetricsAttribute(metricsScope pmetric.ScopeMetrics, attribute string, value string) pmetric.ScopeMetrics { + metrics := metricsScope.Metrics() + for i := 0; i < metrics.Len(); i++ { + metricItem := metrics.At(i) + + switch metricItem.Type() { + case pmetric.MetricTypeExponentialHistogram: + for i := 0; i < metricItem.ExponentialHistogram().DataPoints().Len(); i++ { + metricItem.ExponentialHistogram().DataPoints().At(i).Attributes().PutStr(attribute, value) + } + case pmetric.MetricTypeGauge: + for i := 0; i < metricItem.Gauge().DataPoints().Len(); i++ { + metricItem.Gauge().DataPoints().At(i).Attributes().PutStr(attribute, value) + } + case pmetric.MetricTypeHistogram: + for i := 0; i < metricItem.Histogram().DataPoints().Len(); i++ { + metricItem.Histogram().DataPoints().At(i).Attributes().PutStr(attribute, value) + } + case pmetric.MetricTypeSum: + for i := 0; i < metricItem.Sum().DataPoints().Len(); i++ { + metricItem.Sum().DataPoints().At(i).Attributes().PutStr(attribute, value) + } + case pmetric.MetricTypeSummary: + for i := 0; i < metricItem.Summary().DataPoints().Len(); i++ { + metricItem.Summary().DataPoints().At(i).Attributes().PutStr(attribute, value) + } + default: + e.logger.Warn("not supported metric type", zap.String("name", metricItem.Name()), + zap.String("type", metricItem.Type().String())) + metrics.RemoveIf(func(m pmetric.Metric) bool { + return m.Name() == metricItem.Name() + }) + } + } + return metricsScope +} + +// pushMetrics Exports metrics +func (e *baseExporter) pushMetrics(ctx context.Context, md pmetric.Metrics) error { + tr := pmetricotlp.NewExportRequest() + ref := tr.Metrics().ResourceMetrics().AppendEmpty() + scopes := pmetricotlp.NewExportRequestFromMetrics(md).Metrics().ResourceMetrics().At(0).ScopeMetrics() + for i := 0; i < scopes.Len(); i++ { + scope := scopes.At(i) + policyName, _ := scope.Scope().Attributes().Get("policy_name") + policyNameStr := policyName.AsString() + agentData, err := e.config.OrbAgentService.RetrieveAgentInfoByPolicyName(policyNameStr) + if err != nil { + e.logger.Warn("Policy is not managed by orb", zap.String("policyName", policyNameStr)) + continue + } + + // sort datasetIDs to send always on same order + datasetIDs := strings.Split(agentData.Datasets, ",") + sort.Strings(datasetIDs) + datasets := strings.Join(datasetIDs, ",") + + // Insert pivoted agentTags + for key, value := range agentData.AgentTags { + scope = e.injectScopeMetricsAttribute(scope, key, value) + } + // injecting policyID and datasetIDs attributes + scope.Scope().Attributes().PutStr("policy_id", agentData.PolicyID) + scope.Scope().Attributes().PutStr("dataset_ids", datasets) + scope.CopyTo(ref.ScopeMetrics().AppendEmpty()) + e.logger.Info("scraped metrics for policy", zap.String("policy", policyNameStr), zap.String("policy_id", agentData.PolicyID)) + } + + request, err := tr.MarshalProto() + if err != nil { + defer ctx.Done() + return consumererror.NewPermanent(err) + } + + err = e.export(ctx, e.config.Topic, request) + if err != nil { + ctx.Done() + return err + } + + return err +} + +// inject attribute on all ScopeLogs records +func (e *baseExporter) injectScopeLogsAttribute(logsScope plog.ScopeLogs, attribute string, value string) plog.ScopeLogs { + logs := logsScope.LogRecords() + for i := 0; i < logs.Len(); i++ { + logItem := logs.At(i) + logItem.Attributes().PutStr(attribute, value) + } + return logsScope +} + +func (e *baseExporter) pushLogs(ctx context.Context, ld plog.Logs) error { + tr := plogotlp.NewExportRequest() + ref := tr.Logs().ResourceLogs().AppendEmpty() + scopes := plogotlp.NewExportRequestFromLogs(ld).Logs().ResourceLogs().At(0).ScopeLogs() + for i := 0; i < scopes.Len(); i++ { + scope := scopes.At(i) + policyName := scope.Scope().Name() + agentData, err := e.config.OrbAgentService.RetrieveAgentInfoByPolicyName(policyName) + if err != nil { + e.logger.Warn("Policy is not managed by orb", zap.String("policyName", policyName)) + continue + } + + // sort datasetIDs to send always on same order + datasetIDs := strings.Split(agentData.Datasets, ",") + sort.Strings(datasetIDs) + datasets := strings.Join(datasetIDs, ",") + + // Insert pivoted agentTags + for key, value := range agentData.AgentTags { + scope = e.injectScopeLogsAttribute(scope, key, value) + } + // injecting policyID and datasetIDs attributes + scope.Scope().Attributes().PutStr("policy_id", agentData.PolicyID) + scope.Scope().Attributes().PutStr("dataset_ids", datasets) + scope.CopyTo(ref.ScopeLogs().AppendEmpty()) + e.logger.Info("scraped logs for policy", zap.String("policy", policyName), zap.String("policy_id", agentData.PolicyID)) + } + + request, err := tr.MarshalProto() + if err != nil { + defer ctx.Done() + return consumererror.NewPermanent(err) + } + + err = e.export(ctx, e.config.Topic, request) + if err != nil { + ctx.Done() + return err + } + + return err +} + +// inject attribute on all ScopeSpans spans +func (e *baseExporter) injectScopeSpansAttribute(spanScope ptrace.ScopeSpans, attribute string, value string) ptrace.ScopeSpans { + spans := spanScope.Spans() + for i := 0; i < spans.Len(); i++ { + spanItem := spans.At(i) + spanItem.Attributes().PutStr(attribute, value) + } + return spanScope +} + +func (e *baseExporter) pushTraces(ctx context.Context, td ptrace.Traces) error { + tr := ptraceotlp.NewExportRequest() + ref := tr.Traces().ResourceSpans().AppendEmpty() + scopes := ptraceotlp.NewExportRequestFromTraces(td).Traces().ResourceSpans().At(0).ScopeSpans() + for i := 0; i < scopes.Len(); i++ { + scope := scopes.At(i) + policyName := scope.Scope().Name() + agentData, err := e.config.OrbAgentService.RetrieveAgentInfoByPolicyName(policyName) + if err != nil { + e.logger.Warn("Policy is not managed by orb", zap.String("policyName", policyName)) + continue + } + + // sort datasetIDs to send always on same order + datasetIDs := strings.Split(agentData.Datasets, ",") + sort.Strings(datasetIDs) + datasets := strings.Join(datasetIDs, ",") + + // Insert pivoted agentTags + for key, value := range agentData.AgentTags { + scope = e.injectScopeSpansAttribute(scope, key, value) + } + // injecting policyID and datasetIDs attributes + scope.Scope().Attributes().PutStr("policy_id", agentData.PolicyID) + scope.Scope().Attributes().PutStr("dataset_ids", datasets) + scope.CopyTo(ref.ScopeSpans().AppendEmpty()) + e.logger.Info("scraped traces for policy", zap.String("policy", policyName), zap.String("policy_id", agentData.PolicyID)) + } + + request, err := tr.MarshalProto() + if err != nil { + defer ctx.Done() + return consumererror.NewPermanent(err) + } + + err = e.export(ctx, e.config.Topic, request) + if err != nil { + ctx.Done() + return err + } + + return err +} + +func (e *baseExporter) export(ctx context.Context, topic string, request []byte) error { + compressedPayload := e.compressBrotli(request) + c := *e.config.Client + if token := c.Publish(topic, 1, false, compressedPayload); token.Wait() && token.Error() != nil { + e.logger.Error("error sending metrics RPC", zap.String("topic", topic), zap.Error(token.Error())) + e.config.OrbAgentService.NotifyAgentDisconnection(ctx, token.Error()) + return token.Error() + } + e.logger.Debug("scraped and published telemetry", zap.String("topic", topic), + zap.Int("payload_size_b", len(request)), + zap.Int("compressed_payload_size_b", len(compressedPayload))) + + return nil +} diff --git a/agent/otel/otlpmqttexporter/otlp_test.go b/agent/otel/otlpmqttexporter/otlp_test.go new file mode 100644 index 0000000..1f6b33a --- /dev/null +++ b/agent/otel/otlpmqttexporter/otlp_test.go @@ -0,0 +1,87 @@ +package otlpmqttexporter + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +func TestInvalidConfig(t *testing.T) { + t.Skip("TODO Not sure how to solve this") + c := &Config{} + f := NewFactory() + set := exportertest.NewNopCreateSettings() + _, err := f.CreateTracesExporter(context.Background(), set, c) + require.Error(t, err) + _, err = f.CreateMetricsExporter(context.Background(), set, c) + require.Error(t, err) + _, err = f.CreateLogsExporter(context.Background(), set, c) + require.Error(t, err) +} + +func TestUserAgent(t *testing.T) { + // This test also requires you to use a local mqtt broker, for this I will use mosquitto on port 1887 + t.Skip("This test requires a local mqtt broker, unskip it locally") + mqttAddr := "localhost:1887" + set := exportertest.NewNopCreateSettings() + set.BuildInfo.Description = "Collector" + set.BuildInfo.Version = "1.2.3test" + + tests := []struct { + name string + }{ + { + name: "default_user_agent", + }, + { + name: "custom_user_agent", + }, + { + name: "custom_user_agent_lowercase", + }, + } + + t.Run("metrics", func(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + topic := "channels/uuid1/messages/be/test" + cfg := &Config{ + Address: mqttAddr, + Id: "uuid1", + Key: "uuid2", + TLS: false, + Topic: topic, + } + exp, err := CreateMetricsExporter(context.Background(), set, cfg) + require.NoError(t, err) + + // start the exporter + err = exp.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, exp.Shutdown(context.Background())) + }) + + // generate data + metrics := pmetric.NewMetrics() + metrics.ResourceMetrics() + metrics.ResourceMetrics().AppendEmpty() + tv := metrics.ResourceMetrics().At(0) + tv.SetSchemaUrl("test_url") + tv.ScopeMetrics().AppendEmpty() + sm := tv.ScopeMetrics().At(0) + sm.Metrics().AppendEmpty() + metric := sm.Metrics().At(0) + metric.SetName("test_value") + metric.SetDescription("test_description") + metric.SetUnit("test_unit") + err = exp.ConsumeMetrics(context.Background(), metrics) + require.NoError(t, err) + }) + } + }) +} diff --git a/agent/otel/otlpmqttexporter/testdata/bad_empty_config.yaml b/agent/otel/otlpmqttexporter/testdata/bad_empty_config.yaml new file mode 100644 index 0000000..c52b900 --- /dev/null +++ b/agent/otel/otlpmqttexporter/testdata/bad_empty_config.yaml @@ -0,0 +1,15 @@ +receivers: + nop: + +processors: + nop: + +exporters: + otlphttp: + +service: + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [otlphttp] diff --git a/agent/otel/otlpmqttexporter/testdata/config.yaml b/agent/otel/otlpmqttexporter/testdata/config.yaml new file mode 100644 index 0000000..3d48e47 --- /dev/null +++ b/agent/otel/otlpmqttexporter/testdata/config.yaml @@ -0,0 +1,38 @@ +receivers: + nop: + +processors: + nop: + +exporters: + otlphttp/2: + endpoint: "https://1.2.3.4:1234" + tls: + ca_file: /var/lib/mycert.pem + cert_file: certfile + key_file: keyfile + insecure: true + timeout: 10s + read_buffer_size: 123 + write_buffer_size: 345 + sending_queue: + enabled: true + num_consumers: 2 + queue_size: 10 + retry_on_failure: + enabled: true + initial_interval: 10s + max_interval: 60s + max_elapsed_time: 10m + headers: + "can you have a . here?": "F0000000-0000-0000-0000-000000000000" + header1: 234 + another: "somevalue" + compression: gzip + +service: + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [otlphttp/2] diff --git a/agent/otel/otlpmqttexporter/testdata/test_cert.pem b/agent/otel/otlpmqttexporter/testdata/test_cert.pem new file mode 100644 index 0000000..b2e77b8 --- /dev/null +++ b/agent/otel/otlpmqttexporter/testdata/test_cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE6jCCAtICCQDVU4PtqpqADTANBgkqhkiG9w0BAQsFADA3MQswCQYDVQQGEwJV +UzETMBEGA1UECAwKY2FsaWZvcm5pYTETMBEGA1UECgwKb3BlbmNlbnN1czAeFw0x +OTAzMDQxODA3MjZaFw0yMDAzMDMxODA3MjZaMDcxCzAJBgNVBAYTAlVTMRMwEQYD +VQQIDApjYWxpZm9ybmlhMRMwEQYDVQQKDApvcGVuY2Vuc3VzMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAy9JQiAOMzArcdiS4szbTuzg5yYijSSY6SvGj +XMs4/LEFLxgGmFfyHXxoVQzV26lTu/AiUFlZi4JY2qlkZyPwmmmSg4fmzikpVPiC +Vv9pvSIojs8gs0sHaOt40Q8ym43bNt3Mh8rYrs+XMERi6Ol9//j4LnfePkNU5uEo +qC8KQamckaMR6UEHFNunyOwvNBsipgTPldQUPGVnCsNKk8olYGAXS7DR25bgbPli +4T9VCSElsSPAODmyo+2MEDagVXa1vVYxKyO2k6oeBS0lsvdRqRTmGggcg0B/dk+a +H1CL9ful0cu9P3dQif+hfGay8udPkwDLPEq1+WnjJFut3Pmbk3SqUCas5iWt76kK +eKFh4k8fCy4yiaZxzvSbm9+bEBHAl0ZXd8pjvAsBfCKe6G9SBzE1DK4FjWiiEGCb +5dGsyTKr33q3DekLvT3LF8ZeON/13d9toucX9PqG2HDwMP/Fb4WjQIzOc/H9wIak +pf7u6QBDGUiCMmoDrp1d8RsI1RPbEhoywH0YlLmwgf+cr1dU7vlISf576EsGxFz4 ++/sZjIBvZBHn/x0MH+bs4J8V3vMujfDoRdhL07bK7q/AkEALUxljKEfoWeqiuVzK +F9BVv3xNhiua2kgPVbMNWPrQ5uotkNp8IykJ3QOuQ3p5pzxdGfpLd6f8gmJDmcbi +AI9dWTcCAwEAATANBgkqhkiG9w0BAQsFAAOCAgEAVVi4t/Sumre+AGTaU7np9dl2 +tpllbES5ixe6m2uezt5wAzYNNyuQ2mMG2XrSkMy5gvBZRT9nRNSmLV8VEcxZihG0 +YHS5soXnLL3Jdlwxp98WTDPvM1ntxcHyEyqrrg9YDfKn4sOrr5vo2yZzoKwtxtc7 +lue9JormVx7GxMi7NwaUtCbnwAIcqJJpFjt1EhmJOxGqTJPgUvTBdeGvRj30c6fk +pqpUdPbZ7RKPEtbLoMoCBujKnErv+H0G6Vp9WyCHN+Mi9uTMsGwH14cmJjmfwGDC +8/WF4LdlawFnf/arIp9YcVwcP91d4ywyvbuuo2M7qdosQ7k4uRZ3tyggLYShS3RW +BMEhMRDz9dM0oKGF+HnaS824BIh6O6Hn82Vt8uCKS7IbEX99/kkN1KcqqQe6Lwjq +tG/lm4K5yf+FJVDivpZ9mYTvqTBjhTaOp6m3HYSNJfS0hLQVvEuBNXd8bHiXkcLp +rmFOYUWsjxV1Qku3U5Rner0UpB2Fuw9nJcXuDgWG0gjwzAZ83y3du1VIZp0Ad8Vv +IYpaucbImGJszMtNXn3l72K1wvQVIhm9eRwYc3QteJzweHaDsbytZEoS/GhTrZIT +wRe5ZGrjJBJngRANRSm1BH8j6PjLem9mzPb2eytwJJA0lLhUk4vYproVvXcx0vow +5F+5VB1YB8/tbWePmpo= +-----END CERTIFICATE----- diff --git a/agent/policies/repo.go b/agent/policies/repo.go new file mode 100644 index 0000000..7bc1a43 --- /dev/null +++ b/agent/policies/repo.go @@ -0,0 +1,127 @@ +package policies + +import ( + "errors" + + "go.uber.org/zap" +) + +type PolicyRepo interface { + Exists(policyID string) bool + Get(policyID string) (PolicyData, error) + Remove(policyID string) error + Update(data PolicyData) error + GetAll() ([]PolicyData, error) + GetByName(policyName string) (PolicyData, error) + EnsureDataset(policyID string, datasetID string) error + RemoveDataset(policyID string, datasetID string) (bool, error) + EnsureGroupID(policyID string, agentGroupID string) error +} + +type policyMemRepo struct { + logger *zap.Logger + + db map[string]PolicyData + nameMap map[string]string +} + +var _ PolicyRepo = (*policyMemRepo)(nil) + +func (p policyMemRepo) GetByName(policyName string) (PolicyData, error) { + if id, ok := p.nameMap[policyName]; ok { + return p.Get(id) + } else { + return PolicyData{}, errors.New("policy name not found") + } +} + +func NewMemRepo(logger *zap.Logger) (PolicyRepo, error) { + r := &policyMemRepo{ + logger: logger, + db: make(map[string]PolicyData), + nameMap: make(map[string]string), + } + return r, nil +} + +func (p policyMemRepo) EnsureDataset(policyID string, datasetID string) error { + policy, ok := p.db[policyID] + if !ok { + return errors.New("unknown policy ID") + } + policy.Datasets[datasetID] = true + return nil +} + +func (p policyMemRepo) RemoveDataset(policyID string, datasetID string) (bool, error) { + policy, ok := p.db[policyID] + if !ok { + return false, errors.New("unknown policy ID") + } + if ok := policy.Datasets[datasetID]; ok { + delete(policy.Datasets, datasetID) + } + // If after remove the policy it doesn't have others datasets, + // we can remove the policy from the agent + if len(policy.Datasets) > 0 { + return false, nil + } else { + return true, nil + } +} + +func (p policyMemRepo) Exists(policyID string) bool { + _, ok := p.db[policyID] + return ok +} + +func (p policyMemRepo) Get(policyID string) (PolicyData, error) { + policy, ok := p.db[policyID] + if !ok { + return PolicyData{}, errors.New("unknown policy ID") + } + return policy, nil +} + +func (p policyMemRepo) Remove(policyID string) error { + v, err := p.Get(policyID) + if err != nil { + return err + } + delete(p.nameMap, v.Name) + delete(p.db, policyID) + return nil +} + +func (p policyMemRepo) Update(data PolicyData) error { + policy, ok := p.db[data.ID] + if ok { + // existed, clear old map + delete(p.nameMap, policy.Name) + } + p.db[data.ID] = data + p.nameMap[data.Name] = data.ID + return nil +} + +func (p policyMemRepo) GetAll() (ret []PolicyData, err error) { + ret = make([]PolicyData, len(p.db)) + i := 0 + for _, v := range p.db { + ret[i] = v + i++ + } + err = nil + return ret, err +} + +func (p policyMemRepo) EnsureGroupID(policyID string, agentGroupID string) error { + policy, ok := p.db[policyID] + if !ok { + return errors.New("unknown policy ID") + } + policy.GroupIds[agentGroupID] = true + return nil +} + +var _ PolicyRepo = (*policyMemRepo)(nil) diff --git a/agent/policies/types.go b/agent/policies/types.go new file mode 100644 index 0000000..70c0b13 --- /dev/null +++ b/agent/policies/types.go @@ -0,0 +1,70 @@ +package policies + +import ( + "database/sql/driver" + "time" + + _ "github.com/mattn/go-sqlite3" +) + +type PolicyData struct { + ID string + Datasets map[string]bool + GroupIds map[string]bool + Name string + Backend string + Version int32 + Data interface{} + State PolicyState + BackendErr string + LastScrapeBytes int64 + LastScrapeTS time.Time + PreviousPolicyData *PolicyData +} + +func (d *PolicyData) GetDatasetIDs() []string { + keys := make([]string, len(d.Datasets)) + + i := 0 + for k := range d.Datasets { + keys[i] = k + i++ + } + return keys +} + +const ( + Unknown PolicyState = iota + Running + FailedToApply + Offline + NoTapMatch +) + +type PolicyState int + +var policyStateMap = [...]string{ + "unknown", + "running", + "failed_to_apply", + "offline", + "no_tap_match", +} + +var policyStateRevMap = map[string]PolicyState{ + "unknown": Unknown, + "running": Running, + "failed_to_apply": FailedToApply, + "offline": Offline, + "no_tap_match": NoTapMatch, +} + +func (s PolicyState) String() string { + return policyStateMap[s] +} + +func (s *PolicyState) Scan(value interface{}) error { + *s = policyStateRevMap[string(value.([]byte))] + return nil +} +func (s PolicyState) Value() (driver.Value, error) { return s.String(), nil } diff --git a/agent/policyMgr/manager.go b/agent/policyMgr/manager.go new file mode 100644 index 0000000..3eff5e4 --- /dev/null +++ b/agent/policyMgr/manager.go @@ -0,0 +1,265 @@ +package manager + +import ( + "errors" + "fmt" + "strings" + + "github.com/jmoiron/sqlx" + "github.com/netboxlabs/orb-agent/agent/backend" + "github.com/netboxlabs/orb-agent/agent/config" + "github.com/netboxlabs/orb-agent/agent/policies" + "github.com/orb-community/orb/fleet" + "go.uber.org/zap" +) + +type PolicyManager interface { + ManagePolicy(payload fleet.AgentPolicyRPCPayload) + RemovePolicyDataset(policyID string, datasetID string, be backend.Backend) + GetPolicyState() ([]policies.PolicyData, error) + GetRepo() policies.PolicyRepo + ApplyBackendPolicies(be backend.Backend) error + RemoveBackendPolicies(be backend.Backend, permanently bool) error + RemovePolicy(policyID string, policyName string, beName string) error +} + +var _ PolicyManager = (*policyManager)(nil) + +type policyManager struct { + logger *zap.Logger + config config.Config + + repo policies.PolicyRepo +} + +func (a *policyManager) GetRepo() policies.PolicyRepo { + return a.repo +} + +func (a *policyManager) GetPolicyState() ([]policies.PolicyData, error) { + return a.repo.GetAll() +} + +func New(logger *zap.Logger, c config.Config, db *sqlx.DB) (PolicyManager, error) { + repo, err := policies.NewMemRepo(logger) + if err != nil { + return nil, err + } + return &policyManager{logger: logger, config: c, repo: repo}, nil +} + +func (a *policyManager) ManagePolicy(payload fleet.AgentPolicyRPCPayload) { + + a.logger.Info("managing agent policy from core", + zap.String("action", payload.Action), + zap.String("name", payload.Name), + zap.String("dataset", payload.DatasetID), + zap.String("backend", payload.Backend), + zap.String("id", payload.ID), + zap.Int32("version", payload.Version)) + + switch payload.Action { + case "manage": + var pd = policies.PolicyData{ + ID: payload.ID, + Name: payload.Name, + Backend: payload.Backend, + Version: payload.Version, + Data: payload.Data, + State: policies.Unknown, + } + var updatePolicy bool + if a.repo.Exists(payload.ID) { + // we have already processed this policy id before (it may be running or failed) + // ensure we are associating this dataset with this policy, if one was specified + // note the usual case is dataset id is NOT passed during policy updates + if payload.DatasetID != "" { + err := a.repo.EnsureDataset(payload.ID, payload.DatasetID) + if err != nil { + a.logger.Warn("policy failed to ensure dataset id", zap.String("policy_id", payload.ID), zap.String("policy_name", payload.Name), zap.String("dataset_id", payload.DatasetID), zap.Error(err)) + } + } + + if payload.AgentGroupID != "" { + err := a.repo.EnsureGroupID(payload.ID, payload.AgentGroupID) + if err != nil { + a.logger.Warn("policy failed to ensure agent group id", zap.String("policy_id", payload.ID), zap.String("policy_name", payload.Name), zap.String("agent_group_id", payload.AgentGroupID), zap.Error(err)) + } + } + + // if policy already exist and has no version upgrade, has no need to apply it again + currentPolicy, err := a.repo.Get(payload.ID) + if err != nil { + a.logger.Error("failed to retrieve policy", zap.String("policy_id", payload.ID), zap.Error(err)) + return + } + if currentPolicy.Version >= pd.Version && currentPolicy.State == policies.Running { + a.logger.Info("a better version of this policy has already been applied, skipping", zap.String("policy_id", pd.ID), zap.String("policy_name", pd.Name), zap.String("attempted_version", fmt.Sprint(pd.Version)), zap.String("current_version", fmt.Sprint(currentPolicy.Version))) + return + } else { + updatePolicy = true + } + if currentPolicy.Name != pd.Name { + pd.PreviousPolicyData = &policies.PolicyData{Name: currentPolicy.Name} + } + pd.Datasets = currentPolicy.Datasets + pd.GroupIds = currentPolicy.GroupIds + } else { + // new policy we have not seen before, associate with this dataset + // on first time we see policy, we *require* dataset + if payload.DatasetID == "" { + a.logger.Error("policy RPC for unseen policy did not include dataset ID, skipping", zap.String("policy_id", payload.ID), zap.String("policy_name", payload.Name)) + return + } + pd.Datasets = map[string]bool{payload.DatasetID: true} + + if payload.AgentGroupID != "" { + pd.GroupIds = map[string]bool{payload.AgentGroupID: true} + } + + } + if !backend.HaveBackend(payload.Backend) { + a.logger.Warn("policy failed to apply because backend is not available", zap.String("policy_id", payload.ID), zap.String("policy_name", payload.Name)) + pd.State = policies.FailedToApply + pd.BackendErr = "backend not available" + } else { + // attempt to apply the policy to the backend. status of policy application (running/failed) is maintained there. + be := backend.GetBackend(payload.Backend) + a.applyPolicy(payload, be, &pd, updatePolicy) + } + // save policy (with latest status) to local policy db + err := a.repo.Update(pd) + if err != nil { + a.logger.Error("got error in update last status", zap.Error(err)) + return + } + return + case "remove": + err := a.RemovePolicy(payload.ID, payload.Name, payload.Backend) + if err != nil { + a.logger.Error("policy failed to be removed", zap.String("policy_id", payload.ID), zap.String("policy_name", payload.Name), zap.Error(err)) + } + break + default: + a.logger.Error("unknown policy action, ignored", zap.String("action", payload.Action)) + } +} + +func (a *policyManager) RemovePolicy(policyID string, policyName string, beName string) error { + var pd = policies.PolicyData{ + ID: policyID, + Name: policyName, + } + if !backend.HaveBackend(beName) { + return errors.New("policy remove for a backend we do not have, ignoring") + } + be := backend.GetBackend(beName) + err := be.RemovePolicy(pd) + if err != nil { + a.logger.Error("backend remove policy failed: will still remove from PolicyManager", zap.String("policy_id", policyID), zap.Error(err)) + } + // Remove policy from orb-agent local repo + err = a.repo.Remove(pd.ID) + if err != nil { + return err + } + return nil +} + +func (a *policyManager) RemovePolicyDataset(policyID string, datasetID string, be backend.Backend) { + policyData, err := a.repo.Get(policyID) + if err != nil { + a.logger.Warn("failed to retrieve policy data", zap.String("policy_id", policyID), zap.String("policy_name", policyData.Name), zap.Error(err)) + return + } + removePolicy, err := a.repo.RemoveDataset(policyID, datasetID) + if err != nil { + a.logger.Warn("failed to remove policy dataset", zap.String("dataset_id", datasetID), zap.String("policy_name", policyData.Name), zap.Error(err)) + return + } + if removePolicy { + // Remove policy via http request + err := be.RemovePolicy(policyData) + if err != nil { + a.logger.Warn("policy failed to remove", zap.String("policy_id", policyID), zap.String("policy_name", policyData.Name), zap.Error(err)) + } + // Remove policy from orb-agent local repo + err = a.repo.Remove(policyData.ID) + if err != nil { + a.logger.Warn("policy failed to remove local", zap.String("policy_id", policyData.ID), zap.String("policy_name", policyData.Name), zap.Error(err)) + } + } +} + +func (a *policyManager) applyPolicy(payload fleet.AgentPolicyRPCPayload, be backend.Backend, pd *policies.PolicyData, updatePolicy bool) { + err := be.ApplyPolicy(*pd, updatePolicy) + if err != nil { + a.logger.Warn("policy failed to apply", zap.String("policy_id", payload.ID), zap.String("policy_name", payload.Name), zap.Error(err)) + switch { + case strings.Contains(err.Error(), "422"): + pd.State = policies.NoTapMatch + default: + pd.State = policies.FailedToApply + } + pd.BackendErr = err.Error() + } else { + a.logger.Info("policy applied successfully", zap.String("policy_id", payload.ID), zap.String("policy_name", payload.Name)) + pd.State = policies.Running + pd.BackendErr = "" + } +} + +func (a *policyManager) RemoveBackendPolicies(be backend.Backend, permanently bool) error { + plcies, err := a.repo.GetAll() + if err != nil { + a.logger.Error("failed to retrieve list of policies", zap.Error(err)) + return err + } + + for _, plcy := range plcies { + err := be.RemovePolicy(plcy) + if err != nil { + a.logger.Error("failed to remove policy from backend", zap.String("policy_id", plcy.ID), zap.String("policy_name", plcy.Name), zap.Error(err)) + // note we continue here: even if the backend failed to remove, we update our policy repo to remove it + } + if permanently { + err = a.repo.Remove(plcy.ID) + if err != nil { + return err + } + } else { + plcy.State = policies.Unknown + err = a.repo.Update(plcy) + if err != nil { + return err + } + } + } + return nil +} + +func (a *policyManager) ApplyBackendPolicies(be backend.Backend) error { + plcies, err := a.repo.GetAll() + if err != nil { + a.logger.Error("failed to retrieve list of policies", zap.Error(err)) + return err + } + + for _, policy := range plcies { + err := be.ApplyPolicy(policy, false) + if err != nil { + a.logger.Warn("policy failed to apply", zap.String("policy_id", policy.ID), zap.String("policy_name", policy.Name), zap.Error(err)) + policy.State = policies.FailedToApply + policy.BackendErr = err.Error() + } else { + a.logger.Info("policy applied successfully", zap.String("policy_id", policy.ID), zap.String("policy_name", policy.Name)) + policy.State = policies.Running + policy.BackendErr = "" + } + err = a.repo.Update(policy) + if err != nil { + return err + } + } + return nil +} diff --git a/agent/rpc_from.go b/agent/rpc_from.go new file mode 100644 index 0000000..339fb94 --- /dev/null +++ b/agent/rpc_from.go @@ -0,0 +1,240 @@ +package agent + +import ( + "context" + "encoding/json" + "fmt" + + mqtt "github.com/eclipse/paho.mqtt.golang" + "github.com/orb-community/orb/fleet" + "go.uber.org/zap" +) + +func (a *orbAgent) handleGroupMembership(rpc fleet.GroupMembershipRPCPayload) { + // if this is the full list, reset all group subscriptions and subscribed to this list + _, _ = a.extendContext("handleGroupMembership") + if rpc.FullList { + a.unsubscribeGroupChannels() + a.subscribeGroupChannels(rpc.Groups) + err := a.sendAgentPoliciesReq() + if err != nil { + a.logger.Error("failed to send agent policies request", zap.Error(err)) + } + } else { + // otherwise, just add these subscriptions to the existing list + a.subscribeGroupChannels(rpc.Groups) + } +} + +func (a *orbAgent) handleAgentPolicies(ctx context.Context, rpc []fleet.AgentPolicyRPCPayload, fullList bool) { + ctx, _ = a.extendContext("handleAgentPolicies") + if fullList { + policies, err := a.policyManager.GetRepo().GetAll() + if err != nil { + a.logger.Error("failed to retrieve policies on handle subscriptions") + return + } + // Create a map with all the old policies + policyRemove := map[string]bool{} + for _, p := range policies { + policyRemove[p.ID] = true + } + for _, payload := range rpc { + if ok := policyRemove[payload.ID]; ok { + policyRemove[payload.ID] = false + } + } + // Remove only the policy which should be removed + for k, v := range policyRemove { + if v == true { + policy, err := a.policyManager.GetRepo().Get(k) + if err != nil { + a.logger.Warn("failed to retrieve policy", zap.String("policy_id", k), zap.Error(err)) + continue + } + err = a.policyManager.RemovePolicy(policy.ID, policy.Name, policy.Backend) + if err != nil { + a.logger.Warn("failed to remove a policy, ignoring", zap.String("policy_id", policy.ID), zap.String("policy_name", policy.Name), zap.Error(err)) + continue + } + } + } + } + + for _, payload := range rpc { + if payload.Action != "sanitize" { + a.policyManager.ManagePolicy(payload) + } + } + + // heart beat with new policy status after application + if a.heartbeatCtx == nil { + a.logonWithHeartbeat() + } +} + +func (a *orbAgent) handleGroupRPCFromCore(_ mqtt.Client, message mqtt.Message) { + handleMsgCtx, handleMsgCtxCancelFunc := a.extendContext("handleGroupRPCFromCore") + go func(ctx context.Context, cancelFunc context.CancelFunc) { + defer cancelFunc() + a.logger.Debug("Group RPC message from core", zap.String("topic", message.Topic()), zap.ByteString("payload", message.Payload())) + var rpc fleet.RPC + if err := json.Unmarshal(message.Payload(), &rpc); err != nil { + a.logger.Error("error decoding RPC message from core", zap.Error(fleet.ErrSchemaMalformed)) + return + } + if rpc.SchemaVersion != fleet.CurrentRPCSchemaVersion { + a.logger.Error("error decoding RPC message from core", zap.Error(fleet.ErrSchemaVersion)) + return + } + if rpc.Func == "" || rpc.Payload == nil { + a.logger.Error("error decoding RPC message from core", zap.Error(fleet.ErrSchemaMalformed)) + return + } + + // dispatch + switch rpc.Func { + case fleet.AgentPolicyRPCFunc: + var r fleet.AgentPolicyRPC + if err := json.Unmarshal(message.Payload(), &r); err != nil { + a.logger.Error("error decoding agent policy message from core", zap.Error(fleet.ErrSchemaMalformed)) + return + } + a.handleAgentPolicies(ctx, r.Payload, r.FullList) + a.logger.Debug("received agent policies, marking success") + if a.policyRequestSucceeded != nil { + a.policyRequestSucceeded() + } + case fleet.GroupRemovedRPCFunc: + var r fleet.GroupRemovedRPC + if err := json.Unmarshal(message.Payload(), &r); err != nil { + a.logger.Error("error decoding agent group removal message from core", zap.Error(fleet.ErrSchemaMalformed)) + return + } + a.handleAgentGroupRemoval(r.Payload) + case fleet.DatasetRemovedRPCFunc: + var r fleet.DatasetRemovedRPC + if err := json.Unmarshal(message.Payload(), &r); err != nil { + a.logger.Error("error decoding dataset removal message from core", zap.Error(fleet.ErrSchemaMalformed)) + return + } + a.handleDatasetRemoval(r.Payload) + default: + a.logger.Warn("unsupported/unhandled core RPC, ignoring", + zap.String("func", rpc.Func), + zap.Any("payload", rpc.Payload)) + } + }(handleMsgCtx, handleMsgCtxCancelFunc) +} + +func (a *orbAgent) handleAgentStop(payload fleet.AgentStopRPCPayload) { + // TODO graceful stop agent https://github.com/orb-community/orb/issues/466 + panic(fmt.Sprintf("control plane requested we terminate, reason: %s", payload.Reason)) +} + +func (a *orbAgent) handleAgentGroupRemoval(rpc fleet.GroupRemovedRPCPayload) { + a.unsubscribeGroupChannel(rpc.ChannelID, rpc.AgentGroupID) + + policies, err := a.policyManager.GetRepo().GetAll() + if err != nil { + return + } + + for _, policy := range policies { + delete(policy.GroupIds, rpc.AgentGroupID) + + if len(policy.GroupIds) == 0 { + a.logger.Info("policy no longer used by any group, removing", zap.String("policy_id", policy.ID), zap.String("policy_name", policy.Name)) + + err = a.policyManager.RemovePolicy(policy.ID, policy.Name, policy.Backend) + if err != nil { + a.logger.Warn("failed to remove a policy, ignoring", zap.String("policy_id", policy.ID), zap.String("policy_name", policy.Name), zap.Error(err)) + continue + } + } else { + for _, datasetID := range rpc.Datasets { + a.removeDatasetFromPolicy(datasetID, policy.ID) + } + } + } +} + +func (a *orbAgent) handleDatasetRemoval(rpc fleet.DatasetRemovedRPCPayload) { + a.removeDatasetFromPolicy(rpc.DatasetID, rpc.PolicyID) +} + +func (a *orbAgent) handleAgentReset(ctx context.Context, payload fleet.AgentResetRPCPayload) { + if payload.FullReset { + err := a.RestartAll(ctx, payload.Reason) + if err != nil { + a.logger.Error("RestartAll failure", zap.Error(err)) + } + } else { + // TODO backend specific restart + // a.RestartBackend() + } +} + +func (a *orbAgent) handleRPCFromCore(client mqtt.Client, message mqtt.Message) { + handleMsgCtx, handleMsgCtxCancelFunc := a.extendContext("handleRPCFromCore") + go func(ctx context.Context, cancelFunc context.CancelFunc) { + a.logger.Debug("RPC message from core", zap.String("topic", message.Topic()), zap.ByteString("payload", message.Payload())) + + var rpc fleet.RPC + if err := json.Unmarshal(message.Payload(), &rpc); err != nil { + a.logger.Error("error decoding RPC message from core", zap.Error(fleet.ErrSchemaMalformed)) + return + } + if rpc.SchemaVersion != fleet.CurrentRPCSchemaVersion { + a.logger.Error("error decoding RPC message from core", zap.Error(fleet.ErrSchemaVersion)) + return + } + if rpc.Func == "" || rpc.Payload == nil { + a.logger.Error("error decoding RPC message from core", zap.Error(fleet.ErrSchemaMalformed)) + return + } + // dispatch + switch rpc.Func { + case fleet.GroupMembershipRPCFunc: + var r fleet.GroupMembershipRPC + if err := json.Unmarshal(message.Payload(), &r); err != nil { + a.logger.Error("error decoding group membership message from core", zap.Error(fleet.ErrSchemaMalformed)) + return + } + a.handleGroupMembership(r.Payload) + a.logger.Debug("received group membership, marking success") + if a.groupRequestSucceeded != nil { + a.groupRequestSucceeded() + } + case fleet.AgentPolicyRPCFunc: + var r fleet.AgentPolicyRPC + if err := json.Unmarshal(message.Payload(), &r); err != nil { + a.logger.Error("error decoding agent policy message from core", zap.Error(fleet.ErrSchemaMalformed)) + return + } + a.handleAgentPolicies(ctx, r.Payload, r.FullList) + a.logger.Debug("received agent policies, marking success") + if a.policyRequestSucceeded != nil { + a.policyRequestSucceeded() + } + case fleet.AgentStopRPCFunc: + var r fleet.AgentStopRPC + if err := json.Unmarshal(message.Payload(), &r); err != nil { + a.logger.Error("error decoding agent stop message from core", zap.Error(fleet.ErrSchemaMalformed)) + return + } + a.handleAgentStop(r.Payload) + case fleet.AgentResetRPCFunc: + var r fleet.AgentResetRPC + if err := json.Unmarshal(message.Payload(), &r); err != nil { + a.logger.Error("error decoding agent reset message from core", zap.Error(fleet.ErrSchemaMalformed)) + return + } + a.handleAgentReset(ctx, r.Payload) + default: + a.logger.Warn("unsupported/unhandled core RPC, ignoring", + zap.String("func", rpc.Func), + zap.Any("payload", rpc.Payload)) + } + }(handleMsgCtx, handleMsgCtxCancelFunc) +} diff --git a/agent/rpc_to.go b/agent/rpc_to.go new file mode 100644 index 0000000..aa192a4 --- /dev/null +++ b/agent/rpc_to.go @@ -0,0 +1,173 @@ +package agent + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/netboxlabs/orb-agent/buildinfo" + "github.com/orb-community/orb/fleet" + "go.uber.org/zap" +) + +func (a *orbAgent) sendCapabilities() error { + + capabilities := fleet.Capabilities{ + SchemaVersion: fleet.CurrentCapabilitiesSchemaVersion, + AgentTags: a.config.OrbAgent.Tags, + OrbAgent: fleet.OrbAgentInfo{ + Version: buildinfo.GetVersion(), + }, + } + + capabilities.Backends = make(map[string]fleet.BackendInfo) + for name, be := range a.backends { + ver, err := be.Version() + if err != nil { + a.logger.Error("backend failed to retrieve version, skipping", zap.String("backend", name), zap.Error(err)) + continue + } + cp, err := be.GetCapabilities() + if err != nil { + a.logger.Error("backend failed to retrieve capabilities, skipping", zap.String("backend", name), zap.Error(err)) + continue + } + capabilities.Backends[name] = fleet.BackendInfo{ + Version: ver, + Data: cp, + } + } + + body, err := json.Marshal(capabilities) + if err != nil { + a.logger.Error("backend failed to marshal capabilities, skipping", zap.Error(err)) + return err + } + + a.logger.Info("sending capabilities", zap.ByteString("value", body)) + if token := a.client.Publish(a.capabilitiesTopic, 1, false, body); token.Wait() && token.Error() != nil { + return token.Error() + } + + return nil +} + +func (a *orbAgent) sendGroupMembershipRequest() error { + a.logger.Debug("sending group membership request") + payload := fleet.GroupMembershipReqRPCPayload{} + + data := fleet.RPC{ + SchemaVersion: fleet.CurrentRPCSchemaVersion, + Func: fleet.GroupMembershipReqRPCFunc, + Payload: payload, + } + body, err := json.Marshal(data) + if err != nil { + return err + } + + if token := a.client.Publish(a.rpcToCoreTopic, 1, false, body); token.Wait() && token.Error() != nil { + return token.Error() + } + return nil +} + +func (a *orbAgent) sendGroupMembershipReq() error { + defer a.retryGroupMembershipRequest() + return a.sendGroupMembershipRequest() +} + +func (a *orbAgent) retryGroupMembershipRequest() { + if a.groupRequestTicker == nil { + a.groupRequestTicker = time.NewTicker(retryRequestFixedTime * retryRequestDuration) + } + var ctx context.Context + ctx, a.groupRequestSucceeded = a.extendContext("retryGroupMembershipRequest") + go func(ctx context.Context) { + defer a.groupRequestTicker.Stop() + defer func(t time.Time) { + a.logger.Info("execution period of the re-request of retryGroupMembership", zap.Duration("waiting period", time.Now().Sub(t))) + }(time.Now()) + lastT := time.Now() + for calls := 1; calls <= retryMaxAttempts; calls++ { + select { + case <-ctx.Done(): + return + case t := <-a.groupRequestTicker.C: + a.logger.Info("agent did not receive any group membership from fleet, re-requesting", zap.Duration("waiting period", lastT.Sub(t))) + duration := retryRequestFixedTime + (calls * retryDurationIncrPerAttempts) + a.groupRequestTicker.Reset(time.Duration(duration) * retryRequestDuration) + err := a.sendGroupMembershipRequest() + if err != nil { + a.logger.Error("failed to send group membership request", zap.Error(err)) + return + } + lastT = t + } + } + a.logger.Warn(fmt.Sprintf("retryGroupMembership retried %d times and still got no response from fleet", retryMaxAttempts)) + return + }(ctx) +} + +func (a *orbAgent) sendAgentPoliciesReq() error { + defer a.retryAgentPolicyResponse() + return a.sendAgentPoliciesRequest() +} + +func (a *orbAgent) sendAgentPoliciesRequest() error { + a.logger.Debug("sending agent policies request") + payload := fleet.AgentPoliciesReqRPCPayload{} + + data := fleet.RPC{ + SchemaVersion: fleet.CurrentRPCSchemaVersion, + Func: fleet.AgentPoliciesReqRPCFunc, + Payload: payload, + } + + body, err := json.Marshal(data) + if err != nil { + return err + } + + if token := a.client.Publish(a.rpcToCoreTopic, 1, false, body); token.Wait() && token.Error() != nil { + return token.Error() + } + + return nil +} + +func (a *orbAgent) retryAgentPolicyResponse() { + if a.policyRequestTicker == nil { + a.policyRequestTicker = time.NewTicker(retryRequestFixedTime * retryRequestDuration) + } + var ctx context.Context + ctx, a.policyRequestSucceeded = a.extendContext("retryAgentPolicyResponse") + go func(ctx context.Context) { + defer a.policyRequestTicker.Stop() + defer func(t time.Time) { + a.logger.Info("execution period of the re-request of retryAgentPolicy", zap.Duration("period", time.Now().Sub(t))) + }(time.Now()) + lastT := time.Now() + for calls := 1; calls <= retryMaxAttempts; calls++ { + select { + case <-ctx.Done(): + a.policyRequestTicker.Stop() + return + case t := <-a.policyRequestTicker.C: + a.logger.Info("agent did not receive any policy from fleet, re-requesting", zap.Duration("waiting period", lastT.Sub(t))) + duration := retryRequestFixedTime + (calls * retryDurationIncrPerAttempts) + a.policyRequestTicker.Reset(time.Duration(duration) * retryRequestDuration) + err := a.sendAgentPoliciesRequest() + if err != nil { + a.logger.Error("failed to send agent policies request", zap.Error(err)) + return + } + lastT = t + } + } + a.logger.Warn(fmt.Sprintf("retryAgentPolicy retried %d times and still got no response from fleet", retryMaxAttempts)) + return + }(ctx) +} diff --git a/buildinfo/version.go b/buildinfo/version.go new file mode 100644 index 0000000..3fa91eb --- /dev/null +++ b/buildinfo/version.go @@ -0,0 +1,40 @@ +package buildinfo + +import ( + "encoding/json" + "net/http" +) + +// set via ldflags -X option at build time +var version = "unknown" + +// minimum version of an agent that we allow to connect +const minAgentVersion string = "0.9.0-develop" + +func GetVersion() string { + return version +} + +func GetMinAgentVersion() string { + return minAgentVersion +} + +// VersionInfo contains version endpoint response. +type VersionInfo struct { + // Service contains service name. + Service string `json:"service"` + + // Version contains service current version value. + Version string `json:"version"` +} + +// Version exposes an HTTP handler for retrieving service version. +func Version(service string) http.HandlerFunc { + return http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) { + res := VersionInfo{service, version} + + data, _ := json.Marshal(res) + + rw.Write(data) + }) +} diff --git a/cmd/agent.example.yaml b/cmd/agent.example.yaml new file mode 100644 index 0000000..4ee5ebb --- /dev/null +++ b/cmd/agent.example.yaml @@ -0,0 +1,44 @@ +version: "1.0" + +# this section is used by pktvisor +# see https://github.com/orb-community/pktvisor/blob/develop/RFCs/2021-04-16-75-taps.md +visor: + taps: + default_pcap: + input_type: pcap + config: + iface: "auto" + +# this section is used orb-agent +# most sections and keys are optional +orb: + # these are arbitrary key value pairs used for dynamically define a group of agents by matching against agent group tags + tags: + region: EU + pop: ams02 + node_type: dns + cloud: + config: + # optionally specify an agent name to use during auto provisioning + # hostname will be used if it's not specified here + agent_name: my-agent1 + auto_provision: true + api: + address: https://api.orb.live + # if auto provisioning, specify API token here (or pass on the command line) + token: TOKEN + mqtt: + address: tls://agents.orb.live:8883 + # if not auto provisioning, specify agent connection details here +# id: "f420a133-7651-412d-852a-6141fafeaea5" +# key: "14ae65ae-092f-4fdc-be6a-0cfb378119dc" +# channel_id: "9610b0a4-b05f-46e5-a32d-000d8a2ec1fd" +# tls: +# verify: true +# db: +# file: "/usr/local/orb/orb-agent.db" + backends: + pktvisor: + binary: "/usr/local/sbin/pktvisord" + # this example assumes the file is saved as agent.yaml. If your file has another name, you must replace it with the proper name + config_file: "/opt/orb/agent.yaml" diff --git a/cmd/e2e_agent_test.go b/cmd/e2e_agent_test.go new file mode 100644 index 0000000..dbcbbd0 --- /dev/null +++ b/cmd/e2e_agent_test.go @@ -0,0 +1,134 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + "testing" + "time" + + "github.com/netboxlabs/orb-agent/agent" + "github.com/netboxlabs/orb-agent/agent/backend/pktvisor" + "github.com/netboxlabs/orb-agent/agent/config" + "github.com/pkg/profile" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func Test_e2e_orbAgent_ConfigFile(t *testing.T) { + t.Skip("local run only, skip in CICD") + defer profile.Start().Stop() + rootCmd := &cobra.Command{ + Use: "orb-agent", + } + + runCmd := &cobra.Command{ + Use: "run", + Short: "Run orb-agent and connect to Orb control plane", + Long: `Run orb-agent and connect to Orb control plane`, + Run: Run, + } + + runCmd.Flags().StringSliceVarP(&cfgFiles, "config", "c", []string{}, "Path to config files (may be specified multiple times)") + runCmd.PersistentFlags().BoolVarP(&Debug, "debug", "d", false, "Enable verbose (debug level) output") + + rootCmd.AddCommand(runCmd) + rootCmd.SetArgs([]string{"run", "-d", "-c", "/home/lpegoraro/workspace/orb/localconfig/config.yaml"}) + ctx, cancelF := context.WithTimeout(context.Background(), 2*time.Minute) + err := rootCmd.ExecuteContext(ctx) + if err != nil { + t.Fail() + } + + select { + case <-ctx.Done(): + cancelF() + return + } +} + +func Test_main(t *testing.T) { + t.Skip("local run only, skip in CICD") + + mergeOrError("/home/lpegoraro/workspace/orb/localconfig/config.yaml") + + // configuration + var cfg config.Config + err := viper.Unmarshal(&cfg) + if err != nil { + cobra.CheckErr(fmt.Errorf("agent start up error (config): %w", err)) + os.Exit(1) + } + + cfg.OrbAgent.Debug.Enable = true + + // include pktvisor backend by default if binary is at default location + _, err = os.Stat(pktvisor.DefaultBinary) + if err == nil && cfg.OrbAgent.Backends == nil { + cfg.OrbAgent.Backends = make(map[string]map[string]string) + cfg.OrbAgent.Backends["pktvisor"] = make(map[string]string) + cfg.OrbAgent.Backends["pktvisor"]["binary"] = pktvisor.DefaultBinary + if len(cfgFiles) > 0 { + cfg.OrbAgent.Backends["pktvisor"]["config_file"] = "/home/lpegoraro/workspace/orb/localconfig/config.yaml" + } + } + + // logger + var logger *zap.Logger + atomicLevel := zap.NewAtomicLevel() + if Debug { + atomicLevel.SetLevel(zap.DebugLevel) + } else { + atomicLevel.SetLevel(zap.InfoLevel) + } + encoderCfg := zap.NewProductionEncoderConfig() + encoderCfg.EncodeTime = zapcore.ISO8601TimeEncoder + core := zapcore.NewCore( + zapcore.NewJSONEncoder(encoderCfg), + os.Stdout, + atomicLevel, + ) + logger = zap.New(core, zap.AddCaller()) + defer func(logger *zap.Logger) { + _ = logger.Sync() + }(logger) + + // new agent + a, err := agent.New(logger, cfg) + if err != nil { + logger.Error("agent start up error", zap.Error(err)) + os.Exit(1) + } + + // handle signals + done := make(chan bool, 1) + rootCtx, cancelFunc := context.WithTimeout(context.WithValue(context.Background(), "routine", "mainRoutine"), 15*time.Minute) + + go func() { + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL) + select { + case <-sigs: + logger.Warn("stop signal received stopping agent") + a.Stop(rootCtx) + cancelFunc() + case <-rootCtx.Done(): + logger.Warn("mainRoutine context cancelled") + done <- true + return + } + }() + + // start agent + err = a.Start(rootCtx, cancelFunc) + if err != nil { + logger.Error("agent startup error", zap.Error(err)) + os.Exit(1) + } + + <-done +} diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 0000000..38cd0a8 --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,231 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "strings" + "syscall" + + "github.com/netboxlabs/orb-agent/agent/backend/otel" + + "github.com/netboxlabs/orb-agent/agent" + "github.com/netboxlabs/orb-agent/agent/backend/pktvisor" + "github.com/netboxlabs/orb-agent/agent/config" + "github.com/netboxlabs/orb-agent/buildinfo" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const ( + defaultConfig = "/opt/orb/agent.yaml" +) + +var ( + cfgFiles []string + Debug bool +) + +func init() { + pktvisor.Register() + otel.Register() +} + +func Version(_ *cobra.Command, _ []string) { + fmt.Printf("orb-agent %s\n", buildinfo.GetVersion()) + os.Exit(0) +} + +func Run(_ *cobra.Command, _ []string) { + + initConfig() + + // configuration + var configData config.Config + err := viper.Unmarshal(&configData) + if err != nil { + cobra.CheckErr(fmt.Errorf("agent start up error (configData): %w", err)) + os.Exit(1) + } + + // logger + var logger *zap.Logger + atomicLevel := zap.NewAtomicLevel() + if Debug { + atomicLevel.SetLevel(zap.DebugLevel) + } else { + atomicLevel.SetLevel(zap.InfoLevel) + } + encoderCfg := zap.NewProductionEncoderConfig() + encoderCfg.EncodeTime = zapcore.ISO8601TimeEncoder + core := zapcore.NewCore( + zapcore.NewJSONEncoder(encoderCfg), + os.Stdout, + atomicLevel, + ) + logger = zap.New(core, zap.AddCaller()) + defer func(logger *zap.Logger) { + _ = logger.Sync() + }(logger) + + // include pktvisor backend by default if binary is at default location + _, err = os.Stat(pktvisor.DefaultBinary) + logger.Info("backends loaded", zap.Any("backends", configData.OrbAgent.Backends)) + if err == nil && configData.OrbAgent.Backends == nil { + logger.Info("no backends loaded, adding pktvisor as default") + configData.OrbAgent.Backends = make(map[string]map[string]string) + configData.OrbAgent.Backends["pktvisor"] = make(map[string]string) + configData.OrbAgent.Backends["pktvisor"]["binary"] = pktvisor.DefaultBinary + configData.OrbAgent.Backends["pktvisor"]["api_host"] = "localhost" + if _, ok := configData.OrbAgent.Backends["pktvisor"]["api_port"]; !ok { + configData.OrbAgent.Backends["pktvisor"]["api_port"] = "10853" + } + if len(cfgFiles) > 0 { + configData.OrbAgent.Backends["pktvisor"]["config_file"] = cfgFiles[0] + } + } + + // new agent + a, err := agent.New(logger, configData) + if err != nil { + logger.Error("agent start up error", zap.Error(err)) + os.Exit(1) + } + + // handle signals + done := make(chan bool, 1) + rootCtx, cancelFunc := context.WithCancel(context.WithValue(context.Background(), "routine", "mainRoutine")) + + go func() { + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL) + select { + case <-sigs: + logger.Warn("stop signal received stopping agent") + a.Stop(rootCtx) + cancelFunc() + case <-rootCtx.Done(): + logger.Warn("mainRoutine context cancelled") + done <- true + return + } + }() + + // start agent + err = a.Start(rootCtx, cancelFunc) + if err != nil { + logger.Error("agent startup error", zap.Error(err)) + os.Exit(1) + } + + <-done +} + +func mergeOrError(path string) { + + v := viper.New() + if len(path) > 0 { + v.SetConfigFile(path) + v.SetConfigType("yaml") + } + + v.AutomaticEnv() + replacer := strings.NewReplacer(".", "_") + v.SetEnvKeyReplacer(replacer) + + // note: viper seems to require a default (or a BindEnv) to be overridden by environment variables + v.SetDefault("orb.cloud.api.address", "https://orb.live") + v.SetDefault("orb.cloud.api.token", "") + v.SetDefault("orb.cloud.config.agent_name", "") + v.SetDefault("orb.cloud.config.auto_provision", true) + v.SetDefault("orb.cloud.mqtt.address", "tls://agents.orb.live:8883") + v.SetDefault("orb.cloud.mqtt.id", "") + v.SetDefault("orb.cloud.mqtt.key", "") + v.SetDefault("orb.cloud.mqtt.channel_id", "") + v.SetDefault("orb.db.file", "/opt/orb/orb-agent.db") + v.SetDefault("orb.tls.verify", true) + v.SetDefault("orb.otel.host", "localhost") + v.SetDefault("orb.otel.port", 0) + v.SetDefault("orb.debug.enable", Debug) + + if len(path) > 0 { + cobra.CheckErr(v.ReadInConfig()) + } + + var fZero float64 + + // check that version of config files are all matched up + if versionNumber1 := viper.GetFloat64("version"); versionNumber1 != fZero { + versionNumber2 := v.GetFloat64("version") + if versionNumber2 == fZero { + cobra.CheckErr("Failed to parse config version in: " + path) + } + if versionNumber2 != versionNumber1 { + cobra.CheckErr("Config file version mismatch in: " + path) + } + } + + // load backend static functions for setting up default values + backendVarsFunction := make(map[string]func(*viper.Viper)) + backendVarsFunction["pktvisor"] = pktvisor.RegisterBackendSpecificVariables + backendVarsFunction["otel"] = otel.RegisterBackendSpecificVariables + + // check if backends are configured + // if not then add pktvisor as default + if len(path) > 0 && len(v.GetStringMap("orb.backends")) == 0 { + pktvisor.RegisterBackendSpecificVariables(v) + } else { + for backendName := range v.GetStringMap("orb.backends") { + if backend := v.GetStringMap("orb.backends." + backendName); backend != nil { + backendVarsFunction[backendName](v) + } + } + } + + cobra.CheckErr(viper.MergeConfigMap(v.AllSettings())) +} + +// initConfig reads in config file and ENV variables if set. +func initConfig() { + // set defaults first + mergeOrError("") + if len(cfgFiles) == 0 { + if _, err := os.Stat(defaultConfig); !os.IsNotExist(err) { + mergeOrError(defaultConfig) + } + } else { + for _, conf := range cfgFiles { + mergeOrError(conf) + } + } +} + +func main() { + + rootCmd := &cobra.Command{ + Use: "orb-agent", + } + + versionCmd := &cobra.Command{ + Use: "version", + Short: "Show agent version", + Run: Version, + } + + runCmd := &cobra.Command{ + Use: "run", + Short: "Run orb-agent and connect to Orb control plane", + Long: `Run orb-agent and connect to Orb control plane`, + Run: Run, + } + + runCmd.Flags().StringSliceVarP(&cfgFiles, "config", "c", []string{}, "Path to config files (may be specified multiple times)") + runCmd.PersistentFlags().BoolVarP(&Debug, "debug", "d", false, "Enable verbose (debug level) output") + + rootCmd.AddCommand(runCmd) + rootCmd.AddCommand(versionCmd) + _ = rootCmd.Execute() +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..4aff33e --- /dev/null +++ b/go.mod @@ -0,0 +1,115 @@ +module github.com/netboxlabs/orb-agent + +go 1.23.2 + +require ( + github.com/fatih/structs v1.1.0 + github.com/google/uuid v1.4.0 + github.com/orb-community/orb v0.30.0 +) + +require ( + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/felixge/fgprof v0.9.3 // indirect + github.com/fxamacker/cbor/v2 v2.5.0 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-kit/kit v0.13.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-zoo/bone v1.3.0 // indirect + github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/mainflux/mainflux v0.0.0-20220415135135-92d8fb99bf82 // indirect + github.com/mainflux/senml v1.5.0 // indirect + github.com/nats-io/nats.go v1.32.0 // indirect + github.com/nats-io/nkeys v0.4.7 // indirect + github.com/nats-io/nuid v1.0.1 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/x448/float16 v0.8.4 // indirect + golang.org/x/crypto v0.18.0 // indirect +) + +require ( + github.com/andybalholm/brotli v1.0.6 + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/eclipse/paho.mqtt.golang v1.4.3 + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-cmd/cmd v1.4.2 + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/gorilla/websocket v1.5.1 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/jmoiron/sqlx v1.4.0 + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.4 // indirect + github.com/knadh/koanf v1.5.0 // indirect + github.com/knadh/koanf/v2 v2.0.1 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-sqlite3 v1.14.22 + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mostynb/go-grpc-compression v1.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/pkg/profile v1.7.0 + github.com/rs/cors v1.10.1 // indirect + github.com/rubenv/sql-migrate v1.7.0 + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/cobra v1.8.1 + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.18.2 + github.com/stretchr/testify v1.9.0 + github.com/subosito/gotenv v1.6.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/collector v0.91.0 // indirect + go.opentelemetry.io/collector/component v0.91.0 + go.opentelemetry.io/collector/config/configauth v0.91.0 // indirect + go.opentelemetry.io/collector/config/configcompression v0.91.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.91.0 + go.opentelemetry.io/collector/config/confighttp v0.91.0 + go.opentelemetry.io/collector/config/confignet v0.91.0 + go.opentelemetry.io/collector/config/configopaque v0.91.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect + go.opentelemetry.io/collector/config/configtls v0.91.0 // indirect + go.opentelemetry.io/collector/config/internal v0.91.0 // indirect + go.opentelemetry.io/collector/confmap v0.91.0 + go.opentelemetry.io/collector/consumer v0.91.0 + go.opentelemetry.io/collector/exporter v0.91.0 + go.opentelemetry.io/collector/extension v0.91.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.91.0 // indirect + go.opentelemetry.io/collector/featuregate v1.0.0 // indirect + go.opentelemetry.io/collector/pdata v1.0.0 + go.opentelemetry.io/collector/receiver v0.91.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.91.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/sdk v1.21.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.21.0 + go.opentelemetry.io/otel/trace v1.21.0 + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.26.0 + golang.org/x/exp v0.0.0-20231127185646-65229373498e + golang.org/x/net v0.20.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect + google.golang.org/grpc v1.60.1 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..e788868 --- /dev/null +++ b/go.sum @@ -0,0 +1,704 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 h1:aRVqY1p2IJaBGStWMsQMpkAa83cPkCDLl80eOj0Rbz4= +cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68/go.mod h1:1a3eRNYX12fs5UABBIXS8HXVvQbX9hRB/RkEBPORpe8= +contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= +contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI= +github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/benbjohnson/immutable v0.4.3 h1:GYHcksoJ9K6HyAUpGxwZURrbTkXA0Dh4otXGqbhdrjA= +github.com/benbjohnson/immutable v0.4.3/go.mod h1:qJIKKSmdqz1tVzNtst1DZzvaqOU1onk1rc03IeM3Owk= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= +github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/cli v25.0.1+incompatible h1:mFpqnrS6Hsm3v1k7Wa/BO23oz0k121MTbTO1lpcGSkU= +github.com/docker/cli v25.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/docker v27.1.2+incompatible h1:AhGzR1xaQIy53qCkxARaFluI00WPGtXn0AJuoQsVYTY= +github.com/docker/docker v27.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eclipse/paho.mqtt.golang v1.4.3 h1:2kwcUGn8seMUfWndX0hGbvH8r7crgcJguQNCyp70xik= +github.com/eclipse/paho.mqtt.golang v1.4.3/go.mod h1:CSYvoAlsMkhYOXh/oKyxa8EcBci6dVkLCbo5tTC1RIE= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= +github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.2.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= +github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-cmd/cmd v1.4.2 h1:pnX38iIJHh4huzBSqfkAZkfXrVwM/5EccAJmrVqMnbg= +github.com/go-cmd/cmd v1.4.2/go.mod h1:u3hxg/ry+D5kwh8WvUkHLAMe2zQCaXd00t35WfQaOFk= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= +github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-zoo/bone v1.3.0 h1:PY6sHq37FnQhj+4ZyqFIzJQHvrrGx0GEc3vTZZC/OsI= +github.com/go-zoo/bone v1.3.0/go.mod h1:HI3Lhb7G3UQcAwEhOJ2WyNcsFtQX1WYHa0Hl4OBbhW8= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= +github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 h1:CqYfpuYIjnlNxM3msdyPRKabhXZWbKjf3Q8BWROFBso= +github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= +github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= +github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= +github.com/knadh/koanf/v2 v2.0.1 h1:1dYGITt1I23x8cfx8ZnldtezdyaZtfAuRtIFOiRzK7g= +github.com/knadh/koanf/v2 v2.0.1/go.mod h1:ZeiIlIDXTE7w1lMT6UVcNiRAS2/rCeLn/GdLNvY1Dus= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mainflux/mainflux v0.0.0-20220415135135-92d8fb99bf82 h1:UWQLBZ7ychamG9uuBtCwVmt1tBQxPQuJ1VszC9zYFS8= +github.com/mainflux/mainflux v0.0.0-20220415135135-92d8fb99bf82/go.mod h1:YPGCoouBMT7gP6u4Hnj7vafJqRzT5yiuKtBNMC/DUIE= +github.com/mainflux/senml v1.5.0 h1:GAd1y1eMohfa6sVYcr2iQfVfkkh9l/q7B1TWF5L68xs= +github.com/mainflux/senml v1.5.0/go.mod h1:SMX76mM5yenjLVjZOM27+njCGkP+AA64O46nRQiBRlE= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= +github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mostynb/go-grpc-compression v1.2.2 h1:XaDbnRvt2+1vgr0b/l0qh4mJAfIxE0bKXtz2Znl3GGI= +github.com/mostynb/go-grpc-compression v1.2.2/go.mod h1:GOCr2KBxXcblCuczg3YdLQlcin1/NfyDA348ckuCH6w= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/nats.go v1.32.0 h1:Bx9BZS+aXYlxW08k8Gd3yR2s73pV5XSoAQUyp1Kwvp0= +github.com/nats-io/nats.go v1.32.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= +github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= +github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid/v2 v2.0.2 h1:r4fFzBm+bv0wNKNh5eXTwU7i85y5x+uwkxCUTNVQqLc= +github.com/oklog/ulid/v2 v2.0.2/go.mod h1:mtBL0Qe/0HAx6/a4Z30qxVIAL1eQDweXq5lxOEiwQ68= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= +github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/orb-community/orb v0.30.0 h1:JHOEAwq0UU8DrLlZhyHv43lVMglwbhS1VBz4B8SZvTM= +github.com/orb-community/orb v0.30.0/go.mod h1:XlE5fdjlS4HJZfwDwl/zaoqmPNgpnsoOveiwTJsApM0= +github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= +github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= +github.com/ory/keto/proto/ory/keto/acl/v1alpha1 v0.0.0-20210616104402-80e043246cf9 h1:gP86NkMkUlqMOTjFQ8lt8T1HbHtCJGGeeeh/6c+nla0= +github.com/ory/keto/proto/ory/keto/acl/v1alpha1 v0.0.0-20210616104402-80e043246cf9/go.mod h1:8IoeBQqIRKWU5L6dTKQTlTwVhlUawpqSBJZWfLLN4FM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= +github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= +github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= +github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= +github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= +github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI= +github.com/rubenv/sql-migrate v1.7.0/go.mod h1:S4wtDEG1CKn+0ShpTtzWhFpHHI5PvCUtiGI+C+Z2THE= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector v0.91.0 h1:C7sGUJDJ5nwm+CkWpAaVP3lNsuYpwSRbkmLncFjkmO8= +go.opentelemetry.io/collector v0.91.0/go.mod h1:YhQpIDZsn+bICAAqgBwXk9wqK8GKZDv+aogfG52zUuE= +go.opentelemetry.io/collector/component v0.91.0 h1:aBT1i2zGyfh9PalYJLfXVvQp+osHyalwyDFselI1CtA= +go.opentelemetry.io/collector/component v0.91.0/go.mod h1:2KBHvjNFdU7oOjsObQeC4Ta2Ef607OISU5obznW00fw= +go.opentelemetry.io/collector/config/configauth v0.91.0 h1:SjWKimuqlpfS3sIlFpfzdkSY/AmMMCEmn9+KRcjEU+s= +go.opentelemetry.io/collector/config/configauth v0.91.0/go.mod h1:wmmMYqv6PxwY+/h7qqvd/LP0XN/wzXoECDu6PYz2Of0= +go.opentelemetry.io/collector/config/configcompression v0.91.0 h1:v+jEpFhLgfJDCUCPsSF03gjoFEvm77PofTCqHKKgXTs= +go.opentelemetry.io/collector/config/configcompression v0.91.0/go.mod h1:LaavoxZsro5lL7qh1g9DMifG0qixWPEecW18Qr8bpag= +go.opentelemetry.io/collector/config/configgrpc v0.91.0 h1:+2zPmgVOhKgelluGaGwE3OMKqwi56i6OiU0+7xWTetM= +go.opentelemetry.io/collector/config/configgrpc v0.91.0/go.mod h1:iWDbg9vYaayewmbKfM2zEk4YvaOOwrs0eiUffypcZFk= +go.opentelemetry.io/collector/config/confighttp v0.91.0 h1:YAOyXcDaLDnF3UqPHH4kYU8lx8BqXJ7hS3Ou8GcmqpQ= +go.opentelemetry.io/collector/config/confighttp v0.91.0/go.mod h1:R6y8KSJzqDe6CE6JsYwt4CTZ2B4AlqRA+V74OJPX3vE= +go.opentelemetry.io/collector/config/confignet v0.91.0 h1:3huNXh04O3wXaN4qPhmmiefyz4dYbOlNcR/OKMByqig= +go.opentelemetry.io/collector/config/confignet v0.91.0/go.mod h1:cpO8JYWGONaViOygKVw+Hd2UoBcn2cUiyi0WWeFTwJY= +go.opentelemetry.io/collector/config/configopaque v0.91.0 h1:bQgJPyARbuXAsU2p6h2YbEm1kHb1stS6hg42ekyMZmI= +go.opentelemetry.io/collector/config/configopaque v0.91.0/go.mod h1:TPCHaU+QXiEV+JXbgyr6mSErTI9chwQyasDVMdJr3eY= +go.opentelemetry.io/collector/config/configtelemetry v0.91.0 h1:mEwvqrYfwUJ7LwYfpcF9M8z7LHFoYaKhEPhnERD/88E= +go.opentelemetry.io/collector/config/configtelemetry v0.91.0/go.mod h1:+LAXM5WFMW/UbTlAuSs6L/W72WC+q8TBJt/6z39FPOU= +go.opentelemetry.io/collector/config/configtls v0.91.0 h1:lZromNeOslPwyVlTPMOzF2q++SY+VONvfH3cDqA0kKk= +go.opentelemetry.io/collector/config/configtls v0.91.0/go.mod h1:E+CW5gZoH8V3z5aSlZxwiof7GAcayzn1HRM+uRILLEI= +go.opentelemetry.io/collector/config/internal v0.91.0 h1:Yx17oFdXOPnY83Jfe1oiXhvfYW7RX/xh3/kpV/iYibM= +go.opentelemetry.io/collector/config/internal v0.91.0/go.mod h1:42VsQ/1kP2qnvzjNi+dfNP+KyCFRADejyrJ8m2GVL3M= +go.opentelemetry.io/collector/confmap v0.91.0 h1:7U2MT+u74oEzq/WWrpXSLKB7nX5jPNC4drwtQdYfwKk= +go.opentelemetry.io/collector/confmap v0.91.0/go.mod h1:uxV+fZ85kG31oovL6Cl3fAMQ3RRPwUvfAbbA9WT1Yhk= +go.opentelemetry.io/collector/consumer v0.91.0 h1:0nU1lUe2S0b8iOmF3w3R/9Dt24n413thRTbXz/nJgrM= +go.opentelemetry.io/collector/consumer v0.91.0/go.mod h1:phTUQmr7hpYfwXyDXo4mFHVjYrlSbZE+nZYlKlbVxGs= +go.opentelemetry.io/collector/exporter v0.91.0 h1:guWcGflFjaenp3BMxAmAKjb8RQG80jQQKjuUFouS+z8= +go.opentelemetry.io/collector/exporter v0.91.0/go.mod h1:hkOBunNNWu6CaTtkRsCJ/OJ509REJZg+DDElevFIQCQ= +go.opentelemetry.io/collector/extension v0.91.0 h1:bkoSLgnWm4g6n+RLmyKG6Up7dr8KmJy68quonoLZnr0= +go.opentelemetry.io/collector/extension v0.91.0/go.mod h1:F3r0fVTTh4sYR0GVv51Qez8lk8v77kTDPdyMOp6A2kg= +go.opentelemetry.io/collector/extension/auth v0.91.0 h1:28Hv5W0GZgv2jR5IiFdJzutTs91KmXFh8DUfVTjwwmI= +go.opentelemetry.io/collector/extension/auth v0.91.0/go.mod h1:diY6Sw7cOAn2qivKipZk4niBFzCCFBj7swAXiG2h9ro= +go.opentelemetry.io/collector/featuregate v1.0.0 h1:5MGqe2v5zxaoo73BUOvUTunftX5J8RGrbFsC2Ha7N3g= +go.opentelemetry.io/collector/featuregate v1.0.0/go.mod h1:xGbRuw+GbutRtVVSEy3YR2yuOlEyiUMhN2M9DJljgqY= +go.opentelemetry.io/collector/pdata v1.0.0 h1:ECP2jnLztewsHmL1opL8BeMtWVc7/oSlKNhfY9jP8ec= +go.opentelemetry.io/collector/pdata v1.0.0/go.mod h1:TsDFgs4JLNG7t6x9D8kGswXUz4mme+MyNChHx8zSF6k= +go.opentelemetry.io/collector/receiver v0.91.0 h1:0TZF/0OXoJtxgm+mvOinRRXo9LgVyOsOgCQfWkNGXJA= +go.opentelemetry.io/collector/receiver v0.91.0/go.mod h1:d5qo2mpovqKoi47hrMxj5BLdLzOXM0mUHL5CKrjfWNM= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.91.0 h1:1Eyc1uR8yr3heKkC4YXFoZip0JqgOXuOiN/tXvl9WUo= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.91.0/go.mod h1:7am8EW0xmHLxeeGIb0xTcsoVc6+1LfNCGdc+b7OvE8k= +go.opentelemetry.io/collector/semconv v0.91.0 h1:TRd+yDDfKQl+aNtS24wmEbJp1/QE/xAFV9SB5zWGxpE= +go.opentelemetry.io/collector/semconv v0.91.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= +go.opentelemetry.io/contrib/config v0.1.1 h1:lIUTrMWkfDE0GvzBLhwv6ATDB1vntrnTsRvUMkZKnfQ= +go.opentelemetry.io/contrib/config v0.1.1/go.mod h1:rDrK4+PS6Cs+WIphU/GO5Sk4TGV36lEQqk/Z1vZkaLI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/exporters/prometheus v0.44.1-0.20231201153405-6027c1ae76f2 h1:TnhkxGJ5qPHAMIMI4r+HPT/BbpoHxqn4xONJrok054o= +go.opentelemetry.io/otel/exporters/prometheus v0.44.1-0.20231201153405-6027c1ae76f2/go.mod h1:ERL2uIeBtg4TxZdojHUwzZfIFlUIjZtxubT5p4h1Gjg= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0= +go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No= +golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=