diff --git a/.github/workflows/scoped-test.yaml b/.github/workflows/scoped-test.yaml index f417839f107b..d0139df299f8 100644 --- a/.github/workflows/scoped-test.yaml +++ b/.github/workflows/scoped-test.yaml @@ -30,12 +30,13 @@ jobs: - '**/*_test.go' scoped-tests: + needs: changedfiles + if: needs.changedfiles.outputs.go_sources != '' || needs.changedfiles.outputs.go_tests != '' strategy: fail-fast: false matrix: os: [ windows-latest ] runs-on: ${{ matrix.os }} - needs: changedfiles steps: - name: Echo changed files shell: bash @@ -59,9 +60,14 @@ jobs: ./.tools key: go-cache-${{ runner.os }}-${{ hashFiles('**/go.sum') }} - - name: Build test tools + - name: Build gotestsum on Windows + if: runner.os == 'Windows' run: make "$(${PWD} -replace '\\', '/')/.tools/gotestsum" + - name: Build gotestsum + if: runner.os != 'Windows' + run: make "$PWD/.tools/gotestsum" + - name: Run changed tests if: needs.changedfiles.outputs.go_tests env: diff --git a/.github/workflows/update-otel.yaml b/.github/workflows/update-otel.yaml new file mode 100644 index 000000000000..5064d77500fb --- /dev/null +++ b/.github/workflows/update-otel.yaml @@ -0,0 +1,36 @@ +name: 'Update contrib to the latest core source' +on: + workflow_dispatch: + # TODO: Enable schedule once it's verified that the action works as expected. + # schedule: + # - cron: "27 21 * * *" # Run at an arbitrary time on weekdays. + +jobs: + update-otel: + runs-on: ubuntu-24.04 + if: ${{ github.repository_owner == 'open-telemetry' }} + steps: + - uses: actions/checkout@v4 + with: + path: opentelemetry-collector-contrib + - name: Pull the latest collector repo + uses: actions/checkout@v4 + with: + path: opentelemetry-collector + repository: open-telemetry/opentelemetry-collector + - name: Update to latest opentelemetry-collector release + run: | + cd opentelemetry-collector-contrib + git config user.name opentelemetrybot + git config user.email 107717825+opentelemetrybot@users.noreply.github.com + make genotelcontribcol + make update-otel + - name: Create pull request against main + uses: peter-evans/create-pull-request@v7 + with: + branch: opentelemetrybot/update-otel + token: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }} + commit-message: Update to latest opentelemetry-collector release. + title: "[chore] Update to latest opentelemetry-collector" + body: | + This PR updates the opentelemetry-collector dependency to the latest release. diff --git a/Makefile b/Makefile index 176ba2b07f85..341c909dbfc3 100644 --- a/Makefile +++ b/Makefile @@ -421,7 +421,8 @@ update-otel:$(MULTIMOD) $(call updatehelper,$(CORE_VERSIONS),$(GOMOD),./cmd/oteltestbedcol/builder-config.yaml) $(MAKE) genotelcontribcol $(MAKE) genoteltestbedcol - $(MAKE) oteltestbedcol + $(MAKE) generate + $(MAKE) crosslink $(MAKE) remove-toolchain git add . && git commit -s -m "[chore] mod and toolchain tidy" ; \ diff --git a/Makefile.Common b/Makefile.Common index 021a39c847b7..ff753533d19f 100644 --- a/Makefile.Common +++ b/Makefile.Common @@ -279,7 +279,7 @@ for-affected-components: if [ -z '$${DEPENDENT_PKGS}' ]; then \ echo "No other package depends on the one being changed."; \ else \ - DEPENDENT_PKG_DIRS=$$(echo $${DEPENDENT_PKGS} | tr ' ' '\n' | xargs -I {} grep --include=go.mod -rl {} | xargs dirname | uniq); \ + DEPENDENT_PKG_DIRS=$$(echo $${DEPENDENT_PKGS} | tr ' ' '\n' | xargs -I {} grep --include=go.mod -rl {} | xargs -r dirname | uniq); \ set -e; for dir in $$(echo $${DEPENDENT_PKG_DIRS}); do \ (cd "$${dir}" && \ echo "running $${CMD} in $${dir}" && \ diff --git a/cmd/opampsupervisor/e2e_test.go b/cmd/opampsupervisor/e2e_test.go index 96935b4bd0d0..b4f80e9624c8 100644 --- a/cmd/opampsupervisor/e2e_test.go +++ b/cmd/opampsupervisor/e2e_test.go @@ -64,7 +64,7 @@ func (tl testLogger) Errorf(_ context.Context, format string, args ...any) { tl.t.Logf(format, args...) } -func defaultConnectingHandler(connectionCallbacks server.ConnectionCallbacksStruct) func(request *http.Request) types.ConnectionResponse { +func defaultConnectingHandler(connectionCallbacks types.ConnectionCallbacks) func(request *http.Request) types.ConnectionResponse { return func(_ *http.Request) types.ConnectionResponse { return types.ConnectionResponse{ Accept: true, @@ -73,11 +73,11 @@ func defaultConnectingHandler(connectionCallbacks server.ConnectionCallbacksStru } } -// onConnectingFuncFactory is a function that will be given to server.CallbacksStruct as +// onConnectingFuncFactory is a function that will be given to types.ConnectionCallbacks as // OnConnectingFunc. This allows changing the ConnectionCallbacks both from the newOpAMPServer // caller and inside of newOpAMP Server, and for custom implementations of the value for `Accept` // in types.ConnectionResponse. -type onConnectingFuncFactory func(connectionCallbacks server.ConnectionCallbacksStruct) func(request *http.Request) types.ConnectionResponse +type onConnectingFuncFactory func(connectionCallbacks types.ConnectionCallbacks) func(request *http.Request) types.ConnectionResponse type testingOpAMPServer struct { addr string @@ -87,20 +87,20 @@ type testingOpAMPServer struct { shutdown func() } -func newOpAMPServer(t *testing.T, connectingCallback onConnectingFuncFactory, callbacks server.ConnectionCallbacksStruct) *testingOpAMPServer { +func newOpAMPServer(t *testing.T, connectingCallback onConnectingFuncFactory, callbacks types.ConnectionCallbacks) *testingOpAMPServer { s := newUnstartedOpAMPServer(t, connectingCallback, callbacks) s.start() return s } -func newUnstartedOpAMPServer(t *testing.T, connectingCallback onConnectingFuncFactory, callbacks server.ConnectionCallbacksStruct) *testingOpAMPServer { +func newUnstartedOpAMPServer(t *testing.T, connectingCallback onConnectingFuncFactory, callbacks types.ConnectionCallbacks) *testingOpAMPServer { var agentConn atomic.Value var isAgentConnected atomic.Bool var didShutdown atomic.Bool connectedChan := make(chan bool) s := server.New(testLogger{t: t}) - onConnectedFunc := callbacks.OnConnectedFunc - callbacks.OnConnectedFunc = func(ctx context.Context, conn types.Connection) { + onConnectedFunc := callbacks.OnConnected + callbacks.OnConnected = func(ctx context.Context, conn types.Connection) { if onConnectedFunc != nil { onConnectedFunc(ctx, conn) } @@ -108,8 +108,8 @@ func newUnstartedOpAMPServer(t *testing.T, connectingCallback onConnectingFuncFa isAgentConnected.Store(true) connectedChan <- true } - onConnectionCloseFunc := callbacks.OnConnectionCloseFunc - callbacks.OnConnectionCloseFunc = func(conn types.Connection) { + onConnectionCloseFunc := callbacks.OnConnectionClose + callbacks.OnConnectionClose = func(conn types.Connection) { isAgentConnected.Store(false) connectedChan <- false if onConnectionCloseFunc != nil { @@ -117,8 +117,8 @@ func newUnstartedOpAMPServer(t *testing.T, connectingCallback onConnectingFuncFa } } handler, _, err := s.Attach(server.Settings{ - Callbacks: server.CallbacksStruct{ - OnConnectingFunc: connectingCallback(callbacks), + Callbacks: types.Callbacks{ + OnConnecting: connectingCallback(callbacks), }, }) require.NoError(t, err) @@ -211,8 +211,8 @@ func TestSupervisorStartsCollectorWithRemoteConfig(t *testing.T) { server := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { if message.EffectiveConfig != nil { config := message.EffectiveConfig.ConfigMap.ConfigMap[""] if config != nil { @@ -287,8 +287,8 @@ func TestSupervisorStartsCollectorWithNoOpAMPServer(t *testing.T) { require.NoError(t, os.WriteFile(remoteConfigFilePath, marshalledRemoteConfig, 0o600)) connected := atomic.Bool{} - server := newUnstartedOpAMPServer(t, defaultConnectingHandler, server.ConnectionCallbacksStruct{ - OnConnectedFunc: func(ctx context.Context, conn types.Connection) { + server := newUnstartedOpAMPServer(t, defaultConnectingHandler, types.ConnectionCallbacks{ + OnConnected: func(ctx context.Context, conn types.Connection) { connected.Store(true) }, }) @@ -331,19 +331,20 @@ func TestSupervisorStartsWithNoOpAMPServer(t *testing.T) { configuredChan := make(chan struct{}) connected := atomic.Bool{} - server := newUnstartedOpAMPServer(t, defaultConnectingHandler, server.ConnectionCallbacksStruct{ - OnConnectedFunc: func(ctx context.Context, conn types.Connection) { - connected.Store(true) - }, - OnMessageFunc: func(ctx context.Context, conn types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { - lastCfgHash := message.GetRemoteConfigStatus().GetLastRemoteConfigHash() - if bytes.Equal(lastCfgHash, hash) { - close(configuredChan) - } + server := newUnstartedOpAMPServer(t, defaultConnectingHandler, + types.ConnectionCallbacks{ + OnConnected: func(ctx context.Context, conn types.Connection) { + connected.Store(true) + }, + OnMessage: func(ctx context.Context, conn types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + lastCfgHash := message.GetRemoteConfigStatus().GetLastRemoteConfigHash() + if bytes.Equal(lastCfgHash, hash) { + close(configuredChan) + } - return &protobufs.ServerToAgent{} - }, - }) + return &protobufs.ServerToAgent{} + }, + }) defer server.shutdown() // The supervisor is started without a running OpAMP server. @@ -415,8 +416,8 @@ func TestSupervisorRestartsCollectorAfterBadConfig(t *testing.T) { server := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { if message.Health != nil { healthReport.Store(message.Health) } @@ -501,8 +502,8 @@ func TestSupervisorConfiguresCapabilities(t *testing.T) { server := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { capabilities.Store(message.Capabilities) return &protobufs.ServerToAgent{} @@ -556,8 +557,8 @@ func TestSupervisorBootstrapsCollector(t *testing.T) { server := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { if message.AgentDescription != nil { agentDescription.Store(message.AgentDescription) } @@ -602,8 +603,8 @@ func TestSupervisorReportsEffectiveConfig(t *testing.T) { server := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { if message.EffectiveConfig != nil { config := message.EffectiveConfig.ConfigMap.ConfigMap[""] if config != nil { @@ -713,8 +714,8 @@ func TestSupervisorAgentDescriptionConfigApplies(t *testing.T) { server := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { if message.AgentDescription != nil { select { case agentDescMessageChan <- message: @@ -866,8 +867,8 @@ func TestSupervisorRestartCommand(t *testing.T) { server := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { if message.Health != nil { healthReport.Store(message.Health) } @@ -948,7 +949,7 @@ func TestSupervisorOpAMPConnectionSettings(t *testing.T) { initialServer := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{}) + types.ConnectionCallbacks{}) s := newSupervisor(t, "accepts_conn", map[string]string{"url": initialServer.addr}) @@ -960,11 +961,11 @@ func TestSupervisorOpAMPConnectionSettings(t *testing.T) { newServer := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnConnectedFunc: func(_ context.Context, _ types.Connection) { + types.ConnectionCallbacks{ + OnConnected: func(_ context.Context, _ types.Connection) { connectedToNewServer.Store(true) }, - OnMessageFunc: func(_ context.Context, _ types.Connection, _ *protobufs.AgentToServer) *protobufs.ServerToAgent { + OnMessage: func(_ context.Context, _ types.Connection, _ *protobufs.AgentToServer) *protobufs.ServerToAgent { return &protobufs.ServerToAgent{} }, }) @@ -999,8 +1000,8 @@ func TestSupervisorRestartsWithLastReceivedConfig(t *testing.T) { initialServer := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { if message.EffectiveConfig != nil { config := message.EffectiveConfig.ConfigMap.ConfigMap[""] if config != nil { @@ -1043,8 +1044,8 @@ func TestSupervisorRestartsWithLastReceivedConfig(t *testing.T) { newServer := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { if message.EffectiveConfig != nil { config := message.EffectiveConfig.ConfigMap.ConfigMap[""] if config != nil { @@ -1087,8 +1088,8 @@ func TestSupervisorPersistsInstanceID(t *testing.T) { server := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { select { case agentIDChan <- message.InstanceUid: default: @@ -1163,8 +1164,8 @@ func TestSupervisorPersistsNewInstanceID(t *testing.T) { server := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { select { case agentIDChan <- message.InstanceUid: default: @@ -1242,7 +1243,7 @@ func TestSupervisorWritesAgentFilesToStorageDir(t *testing.T) { server := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{}, + types.ConnectionCallbacks{}, ) s := newSupervisor(t, "basic", map[string]string{ @@ -1270,8 +1271,8 @@ func TestSupervisorStopsAgentProcessWithEmptyConfigMap(t *testing.T) { server := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { if message.EffectiveConfig != nil { config := message.EffectiveConfig.ConfigMap.ConfigMap[""] if config != nil { @@ -1386,8 +1387,8 @@ func TestSupervisorLogging(t *testing.T) { require.NoError(t, os.WriteFile(remoteCfgFilePath, marshalledRemoteCfg, 0o600)) connected := atomic.Bool{} - server := newUnstartedOpAMPServer(t, defaultConnectingHandler, server.ConnectionCallbacksStruct{ - OnConnectedFunc: func(ctx context.Context, conn types.Connection) { + server := newUnstartedOpAMPServer(t, defaultConnectingHandler, types.ConnectionCallbacks{ + OnConnected: func(ctx context.Context, conn types.Connection) { connected.Store(true) }, }) @@ -1449,8 +1450,8 @@ func TestSupervisorRemoteConfigApplyStatus(t *testing.T) { server := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { if message.EffectiveConfig != nil { config := message.EffectiveConfig.ConfigMap.ConfigMap[""] if config != nil { @@ -1586,8 +1587,8 @@ func TestSupervisorOpAmpServerPort(t *testing.T) { server := newOpAMPServer( t, defaultConnectingHandler, - server.ConnectionCallbacksStruct{ - OnMessageFunc: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { + types.ConnectionCallbacks{ + OnMessage: func(_ context.Context, _ types.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { if message.EffectiveConfig != nil { config := message.EffectiveConfig.ConfigMap.ConfigMap[""] if config != nil { diff --git a/cmd/opampsupervisor/go.mod b/cmd/opampsupervisor/go.mod index f81caa637fb0..01f95769086e 100644 --- a/cmd/opampsupervisor/go.mod +++ b/cmd/opampsupervisor/go.mod @@ -10,7 +10,7 @@ require ( github.com/knadh/koanf/providers/file v1.1.2 github.com/knadh/koanf/providers/rawbytes v0.1.0 github.com/knadh/koanf/v2 v2.1.2 - github.com/open-telemetry/opamp-go v0.17.0 + github.com/open-telemetry/opamp-go v0.18.0 github.com/stretchr/testify v1.10.0 go.opentelemetry.io/collector/config/configopaque v1.23.0 go.opentelemetry.io/collector/config/configtls v1.23.0 @@ -29,7 +29,6 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 // indirect - github.com/google/go-cmp v0.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect diff --git a/cmd/opampsupervisor/go.sum b/cmd/opampsupervisor/go.sum index 11ed3daec6fe..df86d5e3bebd 100644 --- a/cmd/opampsupervisor/go.sum +++ b/cmd/opampsupervisor/go.sum @@ -32,12 +32,14 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/open-telemetry/opamp-go v0.17.0 h1:3R4+B/6Sy8mknLBbzO3gqloqwTT02rCSRcr4ac2B124= -github.com/open-telemetry/opamp-go v0.17.0/go.mod h1:SGDhUoAx7uGutO4ENNMQla/tiSujxgZmMPJXIOPGBdk= +github.com/open-telemetry/opamp-go v0.18.0 h1:sNHsrBvGU2CMxCB1TRJXncDARrmxDEebx8dsEIawqA4= +github.com/open-telemetry/opamp-go v0.18.0/go.mod h1:9/1G6T5dnJz4cJtoYSr6AX18kHdOxnxxETJPZSHyEUg= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.opentelemetry.io/collector/config/configopaque v1.23.0 h1:SEnEzOHufGc4KGOjQq8zKIQuDBmRFl9ncZ3qs1SRpJk= diff --git a/cmd/opampsupervisor/supervisor/server.go b/cmd/opampsupervisor/supervisor/server.go index cd54f96336eb..4ecfd9afa617 100644 --- a/cmd/opampsupervisor/supervisor/server.go +++ b/cmd/opampsupervisor/supervisor/server.go @@ -13,24 +13,26 @@ import ( ) type flattenedSettings struct { - onMessageFunc func(conn serverTypes.Connection, message *protobufs.AgentToServer) - onConnectingFunc func(request *http.Request) (shouldConnect bool, rejectStatusCode int) - onConnectionCloseFunc func(conn serverTypes.Connection) - endpoint string + onMessage func(conn serverTypes.Connection, message *protobufs.AgentToServer) + onConnecting func(request *http.Request) (shouldConnect bool, rejectStatusCode int) + onConnectionClose func(conn serverTypes.Connection) + endpoint string } func (fs flattenedSettings) toServerSettings() server.StartSettings { return server.StartSettings{ Settings: server.Settings{ - Callbacks: fs, + Callbacks: serverTypes.Callbacks{ + OnConnecting: fs.OnConnecting, + }, }, ListenEndpoint: fs.endpoint, } } func (fs flattenedSettings) OnConnecting(request *http.Request) serverTypes.ConnectionResponse { - if fs.onConnectingFunc != nil { - shouldConnect, rejectStatusCode := fs.onConnectingFunc(request) + if fs.onConnecting != nil { + shouldConnect, rejectStatusCode := fs.onConnecting(request) if !shouldConnect { return serverTypes.ConnectionResponse{ Accept: false, @@ -40,23 +42,25 @@ func (fs flattenedSettings) OnConnecting(request *http.Request) serverTypes.Conn } return serverTypes.ConnectionResponse{ - Accept: true, - ConnectionCallbacks: fs, + Accept: true, + ConnectionCallbacks: serverTypes.ConnectionCallbacks{ + OnMessage: fs.OnMessage, + }, } } func (fs flattenedSettings) OnConnected(_ context.Context, _ serverTypes.Connection) {} func (fs flattenedSettings) OnMessage(_ context.Context, conn serverTypes.Connection, message *protobufs.AgentToServer) *protobufs.ServerToAgent { - if fs.onMessageFunc != nil { - fs.onMessageFunc(conn, message) + if fs.onMessage != nil { + fs.onMessage(conn, message) } return &protobufs.ServerToAgent{} } func (fs flattenedSettings) OnConnectionClose(conn serverTypes.Connection) { - if fs.onConnectionCloseFunc != nil { - fs.onConnectionCloseFunc(conn) + if fs.onConnectionClose != nil { + fs.onConnectionClose(conn) } } diff --git a/cmd/opampsupervisor/supervisor/server_test.go b/cmd/opampsupervisor/supervisor/server_test.go index e35c11ab186e..e4272270300a 100644 --- a/cmd/opampsupervisor/supervisor/server_test.go +++ b/cmd/opampsupervisor/supervisor/server_test.go @@ -28,7 +28,7 @@ func Test_flattenedSettings_OnConnecting(t *testing.T) { t.Run("accept connection", func(t *testing.T) { onConnectingFuncCalled := false fs := flattenedSettings{ - onConnectingFunc: func(_ *http.Request) (shouldConnect bool, rejectStatusCode int) { + onConnecting: func(_ *http.Request) (shouldConnect bool, rejectStatusCode int) { onConnectingFuncCalled = true return true, 0 }, @@ -43,7 +43,7 @@ func Test_flattenedSettings_OnConnecting(t *testing.T) { t.Run("do not accept connection", func(t *testing.T) { onConnectingFuncCalled := false fs := flattenedSettings{ - onConnectingFunc: func(_ *http.Request) (shouldConnect bool, rejectStatusCode int) { + onConnecting: func(_ *http.Request) (shouldConnect bool, rejectStatusCode int) { onConnectingFuncCalled = true return false, 500 }, @@ -60,7 +60,7 @@ func Test_flattenedSettings_OnConnecting(t *testing.T) { func Test_flattenedSettings_OnMessage(t *testing.T) { onMessageFuncCalled := false fs := flattenedSettings{ - onMessageFunc: func(_ serverTypes.Connection, _ *protobufs.AgentToServer) { + onMessage: func(_ serverTypes.Connection, _ *protobufs.AgentToServer) { onMessageFuncCalled = true }, } @@ -74,7 +74,7 @@ func Test_flattenedSettings_OnMessage(t *testing.T) { func Test_flattenedSettings_OnConnectionClose(t *testing.T) { onConnectionCloseFuncCalled := false fs := flattenedSettings{ - onConnectionCloseFunc: func(_ serverTypes.Connection) { + onConnectionClose: func(_ serverTypes.Connection) { onConnectionCloseFuncCalled = true }, } diff --git a/cmd/opampsupervisor/supervisor/supervisor.go b/cmd/opampsupervisor/supervisor/supervisor.go index 4607aa6d5958..894854273c6a 100644 --- a/cmd/opampsupervisor/supervisor/supervisor.go +++ b/cmd/opampsupervisor/supervisor/supervisor.go @@ -305,11 +305,11 @@ func (s *Supervisor) getBootstrapInfo() (err error) { // using the Collector's OpAMP extension. err = srv.Start(flattenedSettings{ endpoint: fmt.Sprintf("localhost:%d", s.opampServerPort), - onConnectingFunc: func(_ *http.Request) (bool, int) { + onConnecting: func(_ *http.Request) (bool, int) { connected.Store(true) return true, http.StatusOK }, - onMessageFunc: func(_ serverTypes.Connection, message *protobufs.AgentToServer) { + onMessage: func(_ serverTypes.Connection, message *protobufs.AgentToServer) { if message.AgentDescription != nil { instanceIDSeen := false s.setAgentDescription(message.AgentDescription) @@ -415,33 +415,33 @@ func (s *Supervisor) startOpAMPClient() error { Header: s.config.Server.Headers, TLSConfig: tlsConfig, InstanceUid: types.InstanceUid(s.persistentState.InstanceID), - Callbacks: types.CallbacksStruct{ - OnConnectFunc: func(_ context.Context) { + Callbacks: types.Callbacks{ + OnConnect: func(_ context.Context) { s.logger.Debug("Connected to the server.") }, - OnConnectFailedFunc: func(_ context.Context, err error) { + OnConnectFailed: func(_ context.Context, err error) { s.logger.Error("Failed to connect to the server", zap.Error(err)) }, - OnErrorFunc: func(_ context.Context, err *protobufs.ServerErrorResponse) { + OnError: func(_ context.Context, err *protobufs.ServerErrorResponse) { s.logger.Error("Server returned an error response", zap.String("message", err.ErrorMessage)) }, - OnMessageFunc: s.onMessage, - OnOpampConnectionSettingsFunc: func(ctx context.Context, settings *protobufs.OpAMPConnectionSettings) error { + OnMessage: s.onMessage, + OnOpampConnectionSettings: func(ctx context.Context, settings *protobufs.OpAMPConnectionSettings) error { //nolint:errcheck go s.onOpampConnectionSettings(ctx, settings) return nil }, - OnCommandFunc: func(_ context.Context, command *protobufs.ServerToAgentCommand) error { + OnCommand: func(_ context.Context, command *protobufs.ServerToAgentCommand) error { cmdType := command.GetType() if *cmdType.Enum() == protobufs.CommandType_CommandType_Restart { return s.handleRestartCommand() } return nil }, - SaveRemoteConfigStatusFunc: func(_ context.Context, _ *protobufs.RemoteConfigStatus) { + SaveRemoteConfigStatus: func(_ context.Context, _ *protobufs.RemoteConfigStatus) { // TODO: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/21079 }, - GetEffectiveConfigFunc: func(_ context.Context) (*protobufs.EffectiveConfig, error) { + GetEffectiveConfig: func(_ context.Context) (*protobufs.EffectiveConfig, error) { return s.createEffectiveConfigMsg(), nil }, }, @@ -486,13 +486,13 @@ func (s *Supervisor) startOpAMPServer() error { err = s.opampServer.Start(flattenedSettings{ endpoint: fmt.Sprintf("localhost:%d", s.opampServerPort), - onConnectingFunc: func(_ *http.Request) (bool, int) { + onConnecting: func(_ *http.Request) (bool, int) { // Only allow one agent to be connected the this server at a time. alreadyConnected := connected.Swap(true) return !alreadyConnected, http.StatusConflict }, - onMessageFunc: s.handleAgentOpAMPMessage, - onConnectionCloseFunc: func(_ serverTypes.Connection) { + onMessage: s.handleAgentOpAMPMessage, + onConnectionClose: func(_ serverTypes.Connection) { connected.Store(false) }, }.toServerSettings()) diff --git a/connector/datadogconnector/go.mod b/connector/datadogconnector/go.mod index 178a8882096b..0df4593fb291 100644 --- a/connector/datadogconnector/go.mod +++ b/connector/datadogconnector/go.mod @@ -115,7 +115,20 @@ require ( github.com/alecthomas/participle/v2 v2.1.1 // indirect github.com/antchfx/xmlquery v1.4.3 // indirect github.com/antchfx/xpath v1.3.3 // indirect - github.com/aws/aws-sdk-go v1.55.5 // indirect + github.com/aws/aws-sdk-go-v2 v1.32.7 // indirect + github.com/aws/aws-sdk-go-v2/config v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.47 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2 v1.196.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect + github.com/aws/smithy-go v1.22.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/briandowns/spinner v1.23.0 // indirect @@ -165,7 +178,6 @@ require ( github.com/iancoleman/strcase v0.3.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect diff --git a/connector/datadogconnector/go.sum b/connector/datadogconnector/go.sum index acca33e4b1eb..3db167430df3 100644 --- a/connector/datadogconnector/go.sum +++ b/connector/datadogconnector/go.sum @@ -253,6 +253,34 @@ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw= +github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= +github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 h1:kqOrpojG71DxJm/KDPO+Z/y1phm1JlC8/iT+5XRmAn8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22/go.mod h1:NtSFajXVVL8TA2QNngagVZmUtXciyrHOt7xgz4faS/M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.196.0 h1:ZBtoihAqfT+5b1FwGHOubq8k10KwaIyKZd2/CRTucAU= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.196.0/go.mod h1:00zqVNJFK6UASrTnuvjJHJuaqUdkVz5tW8Ip+VhzuNg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -555,8 +583,6 @@ github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEae github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= diff --git a/exporter/datadogexporter/go.mod b/exporter/datadogexporter/go.mod index 182a985a9333..225d5d081a6b 100644 --- a/exporter/datadogexporter/go.mod +++ b/exporter/datadogexporter/go.mod @@ -38,7 +38,7 @@ require ( github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0 github.com/DataDog/sketches-go v1.4.6 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 - github.com/aws/aws-sdk-go v1.55.5 + github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/cenkalti/backoff/v4 v4.3.0 github.com/google/go-cmp v0.6.0 github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.117.0 @@ -91,6 +91,10 @@ require ( ) require ( + github.com/aws/aws-sdk-go-v2 v1.32.7 + github.com/aws/aws-sdk-go-v2/config v1.28.6 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.196.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.117.0 go.opentelemetry.io/collector/component/componenttest v0.117.0 go.opentelemetry.io/collector/consumer/consumererror v0.117.0 @@ -157,6 +161,16 @@ require ( github.com/antchfx/xmlquery v1.4.3 // indirect github.com/antchfx/xpath v1.3.3 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.47 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect + github.com/aws/smithy-go v1.22.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect diff --git a/exporter/datadogexporter/go.sum b/exporter/datadogexporter/go.sum index 864db8782599..b0899e11f873 100644 --- a/exporter/datadogexporter/go.sum +++ b/exporter/datadogexporter/go.sum @@ -277,6 +277,34 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw= +github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= +github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 h1:kqOrpojG71DxJm/KDPO+Z/y1phm1JlC8/iT+5XRmAn8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22/go.mod h1:NtSFajXVVL8TA2QNngagVZmUtXciyrHOt7xgz4faS/M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.196.0 h1:ZBtoihAqfT+5b1FwGHOubq8k10KwaIyKZd2/CRTucAU= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.196.0/go.mod h1:00zqVNJFK6UASrTnuvjJHJuaqUdkVz5tW8Ip+VhzuNg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= diff --git a/exporter/datadogexporter/integrationtest/go.mod b/exporter/datadogexporter/integrationtest/go.mod index d5e470ebd0b9..8ff77f7cbad3 100644 --- a/exporter/datadogexporter/integrationtest/go.mod +++ b/exporter/datadogexporter/integrationtest/go.mod @@ -127,6 +127,20 @@ require ( github.com/antchfx/xpath v1.3.3 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/aws/aws-sdk-go v1.55.5 // indirect + github.com/aws/aws-sdk-go-v2 v1.32.7 // indirect + github.com/aws/aws-sdk-go-v2/config v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.47 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2 v1.196.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect + github.com/aws/smithy-go v1.22.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/briandowns/spinner v1.23.0 // indirect diff --git a/exporter/datadogexporter/integrationtest/go.sum b/exporter/datadogexporter/integrationtest/go.sum index a049324b6807..8147453f645e 100644 --- a/exporter/datadogexporter/integrationtest/go.sum +++ b/exporter/datadogexporter/integrationtest/go.sum @@ -275,6 +275,34 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw= +github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= +github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 h1:kqOrpojG71DxJm/KDPO+Z/y1phm1JlC8/iT+5XRmAn8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22/go.mod h1:NtSFajXVVL8TA2QNngagVZmUtXciyrHOt7xgz4faS/M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.196.0 h1:ZBtoihAqfT+5b1FwGHOubq8k10KwaIyKZd2/CRTucAU= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.196.0/go.mod h1:00zqVNJFK6UASrTnuvjJHJuaqUdkVz5tW8Ip+VhzuNg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= diff --git a/exporter/datadogexporter/internal/hostmetadata/internal/ec2/ec2.go b/exporter/datadogexporter/internal/hostmetadata/internal/ec2/ec2.go index ef20a5382106..a025fabda443 100644 --- a/exporter/datadogexporter/internal/hostmetadata/internal/ec2/ec2.go +++ b/exporter/datadogexporter/internal/hostmetadata/internal/ec2/ec2.go @@ -7,14 +7,16 @@ package ec2 // import "github.com/open-telemetry/opentelemetry-collector-contrib import ( "context" "fmt" + "io" "strings" "sync" "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/hostmetadata/provider" @@ -42,31 +44,43 @@ func isDefaultHostname(hostname string) bool { // GetHostInfo gets the hostname info from EC2 metadata func GetHostInfo(ctx context.Context, logger *zap.Logger) (hostInfo *HostInfo) { - sess, err := session.NewSession() hostInfo = &HostInfo{} + cfg, err := config.LoadDefaultConfig(ctx) if err != nil { - logger.Warn("Failed to build AWS session", zap.Error(err)) + logger.Warn("Failed to build AWS config", zap.Error(err)) return } - meta := ec2metadata.New(sess) + client := imds.NewFromConfig(cfg) - if !meta.AvailableWithContext(ctx) { - logger.Debug("EC2 Metadata not available") + // Check if metadata service is available by trying to retrieve instance ID + _, err = client.GetMetadata(ctx, &imds.GetMetadataInput{ + Path: "instance-id", + }) + if err != nil { + logger.Debug("EC2 Metadata service is not available", zap.Error(err)) return } - if idDoc, err := meta.GetInstanceIdentityDocumentWithContext(ctx); err == nil { + idDoc, err := client.GetInstanceIdentityDocument(ctx, &imds.GetInstanceIdentityDocumentInput{}) + if err == nil { hostInfo.InstanceID = idDoc.InstanceID } else { logger.Warn("Failed to get EC2 instance id document", zap.Error(err)) } - if ec2Hostname, err := meta.GetMetadataWithContext(ctx, "hostname"); err == nil { - hostInfo.EC2Hostname = ec2Hostname + metadataOutput, err := client.GetMetadata(ctx, &imds.GetMetadataInput{Path: "hostname"}) + if err != nil { + logger.Warn("Failed to retrieve EC2 hostname", zap.Error(err)) } else { - logger.Warn("Failed to get EC2 hostname", zap.Error(err)) + defer metadataOutput.Content.Close() + hostnameBytes, readErr := io.ReadAll(metadataOutput.Content) + if readErr != nil { + logger.Warn("Failed to read EC2 hostname content", zap.Error(readErr)) + } else { + hostInfo.EC2Hostname = string(hostnameBytes) + } } return @@ -94,13 +108,13 @@ type Provider struct { } func NewProvider(logger *zap.Logger) (*Provider, error) { - sess, err := session.NewSession() + cfg, err := config.LoadDefaultConfig(context.Background()) if err != nil { return nil, err } return &Provider{ logger: logger, - detector: ec2provider.NewProvider(sess), + detector: ec2provider.NewProvider(cfg), }, nil } @@ -129,23 +143,20 @@ func (p *Provider) instanceTags(ctx context.Context) (*ec2.DescribeTagsOutput, e // Similar to: // - https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/39dbc1ac8/processor/resourcedetectionprocessor/internal/aws/ec2/ec2.go#L118-L151 // - https://github.com/DataDog/datadog-agent/blob/1b4afdd6a03e8fabcc169b924931b2bb8935dab9/pkg/util/ec2/ec2_tags.go#L104-L134 - sess, err := session.NewSession(&aws.Config{ - Region: aws.String(meta.Region), - }) + cfg, err := config.LoadDefaultConfig(ctx, + config.WithRegion(meta.Region), + ) if err != nil { - return nil, fmt.Errorf("failed to build AWS session: %w", err) + return nil, fmt.Errorf("failed to load AWS config: %w", err) } - svc := ec2.New(sess) - return svc.DescribeTagsWithContext(ctx, - &ec2.DescribeTagsInput{ - Filters: []*ec2.Filter{{ - Name: aws.String("resource-id"), - Values: []*string{ - aws.String(meta.InstanceID), - }, - }}, - }) + client := ec2.NewFromConfig(cfg) + return client.DescribeTags(ctx, &ec2.DescribeTagsInput{ + Filters: []types.Filter{{ + Name: aws.String("resource-id"), + Values: []string{meta.InstanceID}, + }}, + }) } // clusterNameFromTags gets the AWS EC2 Cluster name from the tags on an EC2 instance. diff --git a/exporter/datadogexporter/internal/hostmetadata/internal/ec2/ec2_test.go b/exporter/datadogexporter/internal/hostmetadata/internal/ec2/ec2_test.go index 26d697236466..da1dc3c1ab48 100644 --- a/exporter/datadogexporter/internal/hostmetadata/internal/ec2/ec2_test.go +++ b/exporter/datadogexporter/internal/hostmetadata/internal/ec2/ec2_test.go @@ -5,7 +5,8 @@ package ec2 import ( "testing" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/stretchr/testify/assert" "go.uber.org/zap" ) @@ -56,7 +57,7 @@ func TestClusterNameFromEC2Tags(t *testing.T) { name: "missing cluster name tag", ec2Tags: &ec2.DescribeTagsOutput{ NextToken: strp("NextToken"), - Tags: []*ec2.TagDescription{ + Tags: []types.TagDescription{ {Key: strp("some key"), Value: strp("some value")}, }, }, @@ -66,7 +67,7 @@ func TestClusterNameFromEC2Tags(t *testing.T) { name: "cluster name tag only has the prefix", ec2Tags: &ec2.DescribeTagsOutput{ NextToken: strp("NextToken"), - Tags: []*ec2.TagDescription{ + Tags: []types.TagDescription{ {Key: strp("some key"), Value: strp("some value")}, {Key: strp("kubernetes.io/cluster/"), Value: strp("some value")}, }, @@ -77,7 +78,7 @@ func TestClusterNameFromEC2Tags(t *testing.T) { name: "cluster name is available", ec2Tags: &ec2.DescribeTagsOutput{ NextToken: strp("NextToken"), - Tags: []*ec2.TagDescription{ + Tags: []types.TagDescription{ {Key: strp("some key"), Value: strp("some value")}, {Key: strp("kubernetes.io/cluster/myclustername"), Value: strp("some value")}, }, diff --git a/extension/opampcustommessages/go.mod b/extension/opampcustommessages/go.mod index ae570bbaf804..061532dd4151 100644 --- a/extension/opampcustommessages/go.mod +++ b/extension/opampcustommessages/go.mod @@ -2,6 +2,6 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/extension/opamp go 1.22.0 -require github.com/open-telemetry/opamp-go v0.17.0 +require github.com/open-telemetry/opamp-go v0.18.0 -require google.golang.org/protobuf v1.34.2 // indirect +require google.golang.org/protobuf v1.36.2 // indirect diff --git a/extension/opampcustommessages/go.sum b/extension/opampcustommessages/go.sum index 856df9d69ae9..f4989ff2145b 100644 --- a/extension/opampcustommessages/go.sum +++ b/extension/opampcustommessages/go.sum @@ -1,8 +1,6 @@ -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/open-telemetry/opamp-go v0.17.0 h1:3R4+B/6Sy8mknLBbzO3gqloqwTT02rCSRcr4ac2B124= -github.com/open-telemetry/opamp-go v0.17.0/go.mod h1:SGDhUoAx7uGutO4ENNMQla/tiSujxgZmMPJXIOPGBdk= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/open-telemetry/opamp-go v0.18.0 h1:sNHsrBvGU2CMxCB1TRJXncDARrmxDEebx8dsEIawqA4= +github.com/open-telemetry/opamp-go v0.18.0/go.mod h1:9/1G6T5dnJz4cJtoYSr6AX18kHdOxnxxETJPZSHyEUg= +google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU= +google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/extension/opampextension/go.mod b/extension/opampextension/go.mod index 60dfa59f529c..4e3dc9bcba93 100644 --- a/extension/opampextension/go.mod +++ b/extension/opampextension/go.mod @@ -5,7 +5,7 @@ go 1.22.0 require ( github.com/google/uuid v1.6.0 github.com/oklog/ulid/v2 v2.1.0 - github.com/open-telemetry/opamp-go v0.17.0 + github.com/open-telemetry/opamp-go v0.18.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages v0.117.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/status v0.117.0 github.com/shirou/gopsutil/v4 v4.24.12 diff --git a/extension/opampextension/go.sum b/extension/opampextension/go.sum index c5f0f975dfb9..ef065ce48345 100644 --- a/extension/opampextension/go.sum +++ b/extension/opampextension/go.sum @@ -46,8 +46,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU= github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= -github.com/open-telemetry/opamp-go v0.17.0 h1:3R4+B/6Sy8mknLBbzO3gqloqwTT02rCSRcr4ac2B124= -github.com/open-telemetry/opamp-go v0.17.0/go.mod h1:SGDhUoAx7uGutO4ENNMQla/tiSujxgZmMPJXIOPGBdk= +github.com/open-telemetry/opamp-go v0.18.0 h1:sNHsrBvGU2CMxCB1TRJXncDARrmxDEebx8dsEIawqA4= +github.com/open-telemetry/opamp-go v0.18.0/go.mod h1:9/1G6T5dnJz4cJtoYSr6AX18kHdOxnxxETJPZSHyEUg= github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -57,6 +57,8 @@ github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDN github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= diff --git a/extension/opampextension/opamp_agent.go b/extension/opampextension/opamp_agent.go index 3e26d407fd80..cca5f762ebe3 100644 --- a/extension/opampextension/opamp_agent.go +++ b/extension/opampextension/opamp_agent.go @@ -118,20 +118,20 @@ func (o *opampAgent) Start(ctx context.Context, host component.Host) error { TLSConfig: tls, OpAMPServerURL: o.cfg.Server.GetEndpoint(), InstanceUid: types.InstanceUid(o.instanceID), - Callbacks: types.CallbacksStruct{ - OnConnectFunc: func(_ context.Context) { + Callbacks: types.Callbacks{ + OnConnect: func(_ context.Context) { o.logger.Debug("Connected to the OpAMP server") }, - OnConnectFailedFunc: func(_ context.Context, err error) { + OnConnectFailed: func(_ context.Context, err error) { o.logger.Error("Failed to connect to the OpAMP server", zap.Error(err)) }, - OnErrorFunc: func(_ context.Context, err *protobufs.ServerErrorResponse) { + OnError: func(_ context.Context, err *protobufs.ServerErrorResponse) { o.logger.Error("OpAMP server returned an error response", zap.String("message", err.ErrorMessage)) }, - GetEffectiveConfigFunc: func(_ context.Context) (*protobufs.EffectiveConfig, error) { + GetEffectiveConfig: func(_ context.Context) (*protobufs.EffectiveConfig, error) { return o.composeEffectiveConfig(), nil }, - OnMessageFunc: o.onMessage, + OnMessage: o.onMessage, }, Capabilities: o.capabilities.toAgentCapabilities(), } diff --git a/internal/metadataproviders/aws/ec2/metadata.go b/internal/metadataproviders/aws/ec2/metadata.go index 6df3f8a0e7eb..e81124b5323b 100644 --- a/internal/metadataproviders/aws/ec2/metadata.go +++ b/internal/metadataproviders/aws/ec2/metadata.go @@ -5,37 +5,59 @@ package ec2 // import "github.com/open-telemetry/opentelemetry-collector-contrib import ( "context" + "fmt" + "io" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" ) type Provider interface { - Get(ctx context.Context) (ec2metadata.EC2InstanceIdentityDocument, error) + Get(ctx context.Context) (imds.InstanceIdentityDocument, error) Hostname(ctx context.Context) (string, error) InstanceID(ctx context.Context) (string, error) } type metadataClient struct { - metadata *ec2metadata.EC2Metadata + client *imds.Client } var _ Provider = (*metadataClient)(nil) -func NewProvider(sess *session.Session) Provider { +func NewProvider(cfg aws.Config) Provider { return &metadataClient{ - metadata: ec2metadata.New(sess), + client: imds.NewFromConfig(cfg), } } +func (c *metadataClient) getMetadata(ctx context.Context, path string) (string, error) { + output, err := c.client.GetMetadata(ctx, &imds.GetMetadataInput{Path: path}) + if err != nil { + return "", fmt.Errorf("failed to get %s from IMDS: %w", path, err) + } + defer output.Content.Close() + + data, err := io.ReadAll(output.Content) + if err != nil { + return "", fmt.Errorf("failed to read %s response: %w", path, err) + } + + return string(data), nil +} + func (c *metadataClient) InstanceID(ctx context.Context) (string, error) { - return c.metadata.GetMetadataWithContext(ctx, "instance-id") + return c.getMetadata(ctx, "instance-id") } func (c *metadataClient) Hostname(ctx context.Context) (string, error) { - return c.metadata.GetMetadataWithContext(ctx, "hostname") + return c.getMetadata(ctx, "hostname") } -func (c *metadataClient) Get(ctx context.Context) (ec2metadata.EC2InstanceIdentityDocument, error) { - return c.metadata.GetInstanceIdentityDocumentWithContext(ctx) +func (c *metadataClient) Get(ctx context.Context) (imds.InstanceIdentityDocument, error) { + output, err := c.client.GetInstanceIdentityDocument(ctx, &imds.GetInstanceIdentityDocumentInput{}) + if err != nil { + return imds.InstanceIdentityDocument{}, fmt.Errorf("failed to get instance identity document: %w", err) + } + + return output.InstanceIdentityDocument, nil } diff --git a/internal/metadataproviders/aws/ec2/metadata_test.go b/internal/metadataproviders/aws/ec2/metadata_test.go index 1cdb0f53c497..af2c80104c52 100644 --- a/internal/metadataproviders/aws/ec2/metadata_test.go +++ b/internal/metadataproviders/aws/ec2/metadata_test.go @@ -4,64 +4,198 @@ package ec2 import ( + "bytes" "context" + "fmt" + "io" + "reflect" "testing" + "time" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/awstesting/mock" - "github.com/stretchr/testify/assert" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" ) -func TestMetadataProviderGetError(t *testing.T) { - type args struct { - ctx context.Context - sess *session.Session +type ImdsGetMetadataAPI interface { + GetMetadata(ctx context.Context, params *imds.GetMetadataInput, optFns ...func(*imds.Options)) (*imds.GetMetadataOutput, error) +} + +type ImdsInstanceIdentityDocumentAPI interface { + GetInstanceIdentityDocument(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) +} + +func GetMetadataFromImds(ctx context.Context, api ImdsGetMetadataAPI, path string) ([]byte, error) { + output, err := api.GetMetadata(ctx, &imds.GetMetadataInput{ + Path: path, + }) + if err != nil { + return nil, err + } + defer output.Content.Close() + + return io.ReadAll(output.Content) +} + +func GetInstanceIdentityDocumentFromImds(ctx context.Context, api ImdsInstanceIdentityDocumentAPI) (imds.InstanceIdentityDocument, error) { + output, err := api.GetInstanceIdentityDocument(ctx, &imds.GetInstanceIdentityDocumentInput{}) + if err != nil { + return imds.InstanceIdentityDocument{}, err } - tests := []struct { - name string - args args + + return output.InstanceIdentityDocument, nil +} + +type mockGetMetadataAPI func(ctx context.Context, params *imds.GetMetadataInput, optFns ...func(*imds.Options)) (*imds.GetMetadataOutput, error) + +func (m mockGetMetadataAPI) GetMetadata(ctx context.Context, params *imds.GetMetadataInput, optFns ...func(*imds.Options)) (*imds.GetMetadataOutput, error) { + return m(ctx, params, optFns...) +} + +type mockInstanceIdentityDocumentAPI func(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) + +func (m mockInstanceIdentityDocumentAPI) GetInstanceIdentityDocument(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) { + return m(ctx, params, optFns...) +} + +func TestGetMetadataFromImds(t *testing.T) { + cases := []struct { + name string + client func(t *testing.T) ImdsGetMetadataAPI + path string + expect []byte + wantErr bool }{ { - name: "mock session", - args: args{ - ctx: context.Background(), - sess: mock.Session, + name: "Successfully retrieves InstanceID metadata", + client: func(t *testing.T) ImdsGetMetadataAPI { + return mockGetMetadataAPI(func(_ context.Context, params *imds.GetMetadataInput, _ ...func(*imds.Options)) (*imds.GetMetadataOutput, error) { + t.Helper() + if e, a := "instance-id", params.Path; e != a { + t.Errorf("expected Path: %v, got: %v", e, a) + } + return &imds.GetMetadataOutput{ + Content: io.NopCloser(bytes.NewReader([]byte("this is the body foo bar baz"))), + }, nil + }) + }, + path: "instance-id", + expect: []byte("this is the body foo bar baz"), + wantErr: false, + }, + { + name: "Successfully retrieves Hostname metadata", + client: func(t *testing.T) ImdsGetMetadataAPI { + return mockGetMetadataAPI(func(_ context.Context, params *imds.GetMetadataInput, _ ...func(*imds.Options)) (*imds.GetMetadataOutput, error) { + t.Helper() + if e, a := "hostname", params.Path; e != a { + t.Errorf("expected Path: %v, got: %v", e, a) + } + return &imds.GetMetadataOutput{ + Content: io.NopCloser(bytes.NewReader([]byte("this is the body foo bar baz"))), + }, nil + }) + }, + path: "hostname", + expect: []byte("this is the body foo bar baz"), + wantErr: false, + }, + { + name: "Path is empty", + client: func(t *testing.T) ImdsGetMetadataAPI { + return mockGetMetadataAPI(func(_ context.Context, params *imds.GetMetadataInput, _ ...func(*imds.Options)) (*imds.GetMetadataOutput, error) { + t.Helper() + if params.Path == "" { + return nil, fmt.Errorf("Path cannot be empty") + } + return nil, nil + }) }, + path: "", + expect: nil, + wantErr: true, }, } - for _, tt := range tests { + + for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - c := NewProvider(tt.args.sess) - _, err := c.Get(tt.args.ctx) - assert.Error(t, err) + ctx := context.TODO() + content, err := GetMetadataFromImds(ctx, tt.client(t), tt.path) + if (err != nil) != tt.wantErr { + t.Fatalf("expected error: %v, got: %v", tt.wantErr, err) + } + if !tt.wantErr && !bytes.Equal(tt.expect, content) { + t.Errorf("expected content: %v, got: %v", string(tt.expect), string(content)) + } }) } } -func TestMetadataProvider_available(t *testing.T) { - type fields struct{} - type args struct { - ctx context.Context - sess *session.Session - } - tests := []struct { - name string - fields fields - args args - want error +func TestInstanceIdentityDocumentFromImds(t *testing.T) { + cases := []struct { + name string + client func(t *testing.T) ImdsInstanceIdentityDocumentAPI + expect imds.InstanceIdentityDocument + wantErr bool }{ { - name: "mock session", - fields: fields{}, - args: args{ctx: context.Background(), sess: mock.Session}, - want: nil, + name: "Successfully retrieves Instance Identity Document", + client: func(t *testing.T) ImdsInstanceIdentityDocumentAPI { + return mockInstanceIdentityDocumentAPI(func(_ context.Context, _ *imds.GetInstanceIdentityDocumentInput, _ ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) { + t.Helper() + return &imds.GetInstanceIdentityDocumentOutput{ + InstanceIdentityDocument: imds.InstanceIdentityDocument{ + DevpayProductCodes: []string{"code1", "code2"}, + MarketplaceProductCodes: []string{"market1"}, + AvailabilityZone: "us-west-2a", + PrivateIP: "192.168.1.1", + Version: "2017-09-30", + Region: "us-west-2", + InstanceID: "i-1234567890abcdef0", + BillingProducts: []string{"prod1"}, + InstanceType: "t2.micro", + AccountID: "123456789012", + PendingTime: time.Date(2023, time.January, 1, 0, 0, 0, 0, time.UTC), + ImageID: "ami-abcdef1234567890", + KernelID: "", + RamdiskID: "", + Architecture: "x86_64", + }, + }, nil + }) + }, + expect: imds.InstanceIdentityDocument{ + DevpayProductCodes: []string{"code1", "code2"}, + MarketplaceProductCodes: []string{"market1"}, + AvailabilityZone: "us-west-2a", + PrivateIP: "192.168.1.1", + Version: "2017-09-30", + Region: "us-west-2", + InstanceID: "i-1234567890abcdef0", + BillingProducts: []string{"prod1"}, + InstanceType: "t2.micro", + AccountID: "123456789012", + PendingTime: time.Date(2023, time.January, 1, 0, 0, 0, 0, time.UTC), + ImageID: "ami-abcdef1234567890", + KernelID: "", + RamdiskID: "", + Architecture: "x86_64", + }, + wantErr: false, }, } - for _, tt := range tests { + + for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - c := NewProvider(tt.args.sess) - _, err := c.InstanceID(tt.args.ctx) - assert.ErrorIs(t, err, tt.want) + ctx := context.TODO() + document, err := GetInstanceIdentityDocumentFromImds(ctx, tt.client(t)) + if (err != nil) != tt.wantErr { + t.Fatalf("expected error: %v, got: %v", tt.wantErr, err) + } + + if !tt.wantErr { + if !reflect.DeepEqual(document, tt.expect) { + t.Errorf("expected document: %+v, got: %+v", tt.expect, document) + } + } }) } } diff --git a/internal/metadataproviders/go.mod b/internal/metadataproviders/go.mod index dff68dd443e0..1702ef4c26ce 100644 --- a/internal/metadataproviders/go.mod +++ b/internal/metadataproviders/go.mod @@ -4,7 +4,8 @@ go 1.22.0 require ( github.com/Showmax/go-fqdn v1.0.0 - github.com/aws/aws-sdk-go v1.55.5 + github.com/aws/aws-sdk-go-v2 v1.32.7 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 github.com/docker/docker v27.4.1+incompatible github.com/hashicorp/consul/api v1.31.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.117.0 @@ -22,6 +23,7 @@ require ( require ( github.com/Microsoft/go-winio v0.5.2 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/aws/smithy-go v1.22.1 // indirect github.com/containerd/log v0.1.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.5.0 // indirect @@ -53,7 +55,6 @@ require ( github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/imdario/mergo v0.3.11 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect diff --git a/internal/metadataproviders/go.sum b/internal/metadataproviders/go.sum index 8243ed99bf5f..2995c30f4176 100644 --- a/internal/metadataproviders/go.sum +++ b/internal/metadataproviders/go.sum @@ -51,8 +51,12 @@ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= -github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw= +github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 h1:kqOrpojG71DxJm/KDPO+Z/y1phm1JlC8/iT+5XRmAn8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22/go.mod h1:NtSFajXVVL8TA2QNngagVZmUtXciyrHOt7xgz4faS/M= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -257,10 +261,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= diff --git a/processor/attributesprocessor/factory_test.go b/processor/attributesprocessor/factory_test.go index c210ffd08a1d..72a26027ba52 100644 --- a/processor/attributesprocessor/factory_test.go +++ b/processor/attributesprocessor/factory_test.go @@ -91,7 +91,7 @@ func TestFactory_CreateMetrics(t *testing.T) { {Key: "fake_key", Action: attraction.UPSERT}, } - // Upsert should fail on non-existent key + // Upsert should fail on nonexistent key mp, err = factory.CreateMetrics(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop()) require.Nil(t, mp) require.Error(t, err) diff --git a/processor/attributesprocessor/testdata/config.yaml b/processor/attributesprocessor/testdata/config.yaml index ec78f3b09c96..8f0f7dbb09a8 100644 --- a/processor/attributesprocessor/testdata/config.yaml +++ b/processor/attributesprocessor/testdata/config.yaml @@ -33,7 +33,7 @@ attributes/regex_insert: # http_path: path # http_query_params=queryParam1=value1,queryParam2=value2 # http.url value does NOT change. - # Note: Similar to the Span Procesor, if a target key already exists, + # Note: Similar to the Span Processor, if a target key already exists, # it will be updated. - key: "http.url" pattern: ^(?P.*):\/\/(?P.*)\/(?P.*)(\?|\&)(?P.*) diff --git a/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go b/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go index 8ebcdb188f23..3412efc5849b 100644 --- a/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go +++ b/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go @@ -40,7 +40,7 @@ func TestMetricTracker_Convert(t *testing.T) { future := time.Now().Add(1 * time.Hour) keepSubsequentTest := subTest{ - name: "keep subsequet value", + name: "keep subsequent value", value: ValuePoint{ ObservedTimestamp: pcommon.NewTimestampFromTime(future.Add(time.Minute)), FloatValue: 225, diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/scale.go b/processor/deltatocumulativeprocessor/internal/data/expo/scale.go index c19830071eaf..9c3ee88652c8 100644 --- a/processor/deltatocumulativeprocessor/internal/data/expo/scale.go +++ b/processor/deltatocumulativeprocessor/internal/data/expo/scale.go @@ -47,7 +47,7 @@ func Downscale(bs Buckets, from, to Scale) { case from < to: // because even distribution within the buckets cannot be assumed, it is // not possible to correctly upscale (split) buckets. - // any attempt to do so would yield erronous data. + // any attempt to do so would yield erroneous data. panic(fmt.Sprintf("cannot upscale without introducing error (%d -> %d)", from, to)) } @@ -107,7 +107,7 @@ func Collapse(bs Buckets) { // zero the excess area. its not needed to represent the observation // anymore, but kept for two reasons: // 1. future observations may need it, no need to re-alloc then if kept - // 2. [pcommon.Uint64Slice] can not, in fact, be sliced, so getting rid + // 2. [pcommon.Uint64Slice] cannot, in fact, be sliced, so getting rid // of it would alloc ¯\_(ツ)_/¯ for i := size; i < counts.Len(); i++ { counts.SetAt(i, 0) diff --git a/processor/filterprocessor/metrics.go b/processor/filterprocessor/metrics.go index 63beb811e2d6..655f4ba0b3f7 100644 --- a/processor/filterprocessor/metrics.go +++ b/processor/filterprocessor/metrics.go @@ -157,7 +157,7 @@ func (fmp *filterMetricProcessor) processMetrics(ctx context.Context, md pmetric errors = multierr.Append(errors, fmp.handleHistogramDataPoints(ctx, metric.Histogram().DataPoints(), metric, smetrics.Metrics(), scope, resource)) return metric.Histogram().DataPoints().Len() == 0 case pmetric.MetricTypeExponentialHistogram: - errors = multierr.Append(errors, fmp.handleExponetialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metric, smetrics.Metrics(), scope, resource)) + errors = multierr.Append(errors, fmp.handleExponentialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metric, smetrics.Metrics(), scope, resource)) return metric.ExponentialHistogram().DataPoints().Len() == 0 case pmetric.MetricTypeSummary: errors = multierr.Append(errors, fmp.handleSummaryDataPoints(ctx, metric.Summary().DataPoints(), metric, smetrics.Metrics(), scope, resource)) @@ -283,7 +283,7 @@ func (fmp *filterMetricProcessor) handleHistogramDataPoints(ctx context.Context, return errors } -func (fmp *filterMetricProcessor) handleExponetialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { +func (fmp *filterMetricProcessor) handleExponentialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { var errors error dps.RemoveIf(func(datapoint pmetric.ExponentialHistogramDataPoint) bool { skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) diff --git a/processor/geoipprocessor/internal/provider/maxmindprovider/provider.go b/processor/geoipprocessor/internal/provider/maxmindprovider/provider.go index f56b02fe4de0..9fb128b69bcc 100644 --- a/processor/geoipprocessor/internal/provider/maxmindprovider/provider.go +++ b/processor/geoipprocessor/internal/provider/maxmindprovider/provider.go @@ -58,7 +58,7 @@ func (g *maxMindProvider) Location(_ context.Context, ipAddress net.IP) (attribu } } -// cityAttributes returns a list of key-values containing geographical metadata associated to the provided IP. The key names are populated using the internal geo IP conventions package. If the an invalid or nil IP is provided, an error is returned. +// cityAttributes returns a list of key-values containing geographical metadata associated to the provided IP. The key names are populated using the internal geo IP conventions package. If an invalid or nil IP is provided, an error is returned. func (g *maxMindProvider) cityAttributes(ipAddress net.IP) (*[]attribute.KeyValue, error) { attributes := make([]attribute.KeyValue, 0, 11) diff --git a/processor/geoipprocessor/internal/provider/maxmindprovider/testdata/generate_db.go b/processor/geoipprocessor/internal/provider/maxmindprovider/testdata/generate_db.go index 335713f2deec..10fc2f5f3779 100644 --- a/processor/geoipprocessor/internal/provider/maxmindprovider/testdata/generate_db.go +++ b/processor/geoipprocessor/internal/provider/maxmindprovider/testdata/generate_db.go @@ -10,7 +10,7 @@ import ( "github.com/maxmind/MaxMind-DB/pkg/writer" ) -// GenerateLocalDB generates *.mmdb databases files given a source directory data. It uses a the writer functionality provided by MaxMind-Db/pkg/writer +// GenerateLocalDB generates *.mmdb databases files given a source directory data. It uses the writer functionality provided by MaxMind-Db/pkg/writer func GenerateLocalDB(t *testing.T, sourceData string) string { tmpDir, err := os.MkdirTemp("", "") if err != nil { diff --git a/processor/groupbytraceprocessor/README.md b/processor/groupbytraceprocessor/README.md index c4fc5e14ba3a..9aa43e96df53 100644 --- a/processor/groupbytraceprocessor/README.md +++ b/processor/groupbytraceprocessor/README.md @@ -68,7 +68,7 @@ The following metrics are recorded by this processor: A healthy system would have the same value for the metric `otelcol_processor_groupbytrace_spans_released` and for three events under `otelcol_processor_groupbytrace_event_latency_bucket`: `onTraceExpired`, `onTraceRemoved` and `onTraceReleased`. -The metric `otelcol_processor_groupbytrace_event_latency_bucket` is a bucket and shows how long each event took to be processed in miliseconds. In most cases, it should take less than 5ms for an event to be processed, but it might be the case where an event could take 10ms. Higher latencies are possible, but it should never really reach the last item, representing 1s. Events taking more than 1s are killed automatically, and if you have multiple items in this bucket, it might indicate a bug in the software. +The metric `otelcol_processor_groupbytrace_event_latency_bucket` is a bucket and shows how long each event took to be processed in milliseconds. In most cases, it should take less than 5ms for an event to be processed, but it might be the case where an event could take 10ms. Higher latencies are possible, but it should never really reach the last item, representing 1s. Events taking more than 1s are killed automatically, and if you have multiple items in this bucket, it might indicate a bug in the software. Most metrics are updated when the events occur, except for the following ones, which are updated periodically: * `otelcol_processor_groupbytrace_num_events_in_queue` diff --git a/processor/intervalprocessor/README.md b/processor/intervalprocessor/README.md index 10debe739740..8659cd76629b 100644 --- a/processor/intervalprocessor/README.md +++ b/processor/intervalprocessor/README.md @@ -51,29 +51,29 @@ intervalprocessor: The following sum metrics come into the processor to be handled -| Timestamp | Metric Name | Aggregation Temporarility | Attributes | Value | -| --------- | ------------ | ------------------------- | ----------------- | ----: | -| 0 | test_metric | Cumulative | labelA: foo | 4.0 | -| 2 | test_metric | Cumulative | labelA: bar | 3.1 | -| 4 | other_metric | Delta | fruitType: orange | 77.4 | -| 6 | test_metric | Cumulative | labelA: foo | 8.2 | -| 8 | test_metric | Cumulative | labelA: foo | 12.8 | -| 10 | test_metric | Cumulative | labelA: bar | 6.4 | +| Timestamp | Metric Name | Aggregation Temporality | Attributes | Value | +| --------- | ------------ | ----------------------- | ----------------- | ----: | +| 0 | test_metric | Cumulative | labelA: foo | 4.0 | +| 2 | test_metric | Cumulative | labelA: bar | 3.1 | +| 4 | other_metric | Delta | fruitType: orange | 77.4 | +| 6 | test_metric | Cumulative | labelA: foo | 8.2 | +| 8 | test_metric | Cumulative | labelA: foo | 12.8 | +| 10 | test_metric | Cumulative | labelA: bar | 6.4 | The processor would immediately pass the following metrics to the next processor in the chain -| Timestamp | Metric Name | Aggregation Temporarility | Attributes | Value | -| --------- | ------------ | ------------------------- | ----------------- | ----: | -| 4 | other_metric | Delta | fruitType: orange | 77.4 | +| Timestamp | Metric Name | Aggregation Temporality | Attributes | Value | +| --------- | ------------ | ----------------------- | ----------------- | ----: | +| 4 | other_metric | Delta | fruitType: orange | 77.4 | Because it's a Delta metric. At the next `interval` (15s by default), the processor would pass the following metrics to the next processor in the chain -| Timestamp | Metric Name | Aggregation Temporarility | Attributes | Value | -| --------- | ----------- | ------------------------- | ----------- | ----: | -| 8 | test_metric | Cumulative | labelA: foo | 12.8 | -| 10 | test_metric | Cumulative | labelA: bar | 6.4 | +| Timestamp | Metric Name | Aggregation Temporality | Attributes | Value | +| --------- | ----------- | ----------------------- | ----------- | ----: | +| 8 | test_metric | Cumulative | labelA: foo | 12.8 | +| 10 | test_metric | Cumulative | labelA: bar | 6.4 | > [!IMPORTANT] > After exporting, any internal state is cleared. So if no new metrics come in, the next interval will export nothing. diff --git a/processor/k8sattributesprocessor/README.md b/processor/k8sattributesprocessor/README.md index ca38d8599ff9..425bef183f4a 100644 --- a/processor/k8sattributesprocessor/README.md +++ b/processor/k8sattributesprocessor/README.md @@ -341,7 +341,7 @@ k8sattributes: filter: namespace: ``` -With the namespace filter set, the processor will only look up pods and replicasets in the selected namespace. Note that with just a role binding, the processor can not query metadata such as labels and annotations from k8s `nodes` and `namespaces` which are cluster-scoped objects. This also means that the processor can not set the value for `k8s.cluster.uid` attribute if enabled, since the `k8s.cluster.uid` attribute is set to the uid of the namespace `kube-system` which is not queryable with namespaced rbac. +With the namespace filter set, the processor will only look up pods and replicasets in the selected namespace. Note that with just a role binding, the processor cannot query metadata such as labels and annotations from k8s `nodes` and `namespaces` which are cluster-scoped objects. This also means that the processor cannot set the value for `k8s.cluster.uid` attribute if enabled, since the `k8s.cluster.uid` attribute is set to the uid of the namespace `kube-system` which is not queryable with namespaced rbac. Example `Role` and `RoleBinding` to create in the namespace being watched. ```yaml @@ -389,7 +389,7 @@ When running as an agent, the processor detects IP addresses of pods sending spa and uses this information to extract metadata from pods. When running as an agent, it is important to apply a discovery filter so that the processor only discovers pods from the same host that it is running on. Not using such a filter can result in unnecessary resource usage especially on very large clusters. Once the filter is applied, -each processor will only query the k8s API for pods running on it's own node. +each processor will only query the k8s API for pods running on its own node. Node filter can be applied by setting the `filter.node` config option to the name of a k8s node. While this works as expected, it cannot be used to automatically filter pods by the same node that the processor is running on in @@ -498,7 +498,7 @@ The following config with the feature gate set will lead to validation error: #### Migration -Deprecation of the `extract.annotations.regex` and `extract.labels.regex` fields means that it is recommended to use the `ExtractPatterns` function from the transform processor instead. To convert your current configuration please check the `ExtractPatterns` function [documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#extractpatterns). You should use the `pattern` parameter of `ExtractPatterns` instead of using the the `extract.annotations.regex` and `extract.labels.regex` fields. +Deprecation of the `extract.annotations.regex` and `extract.labels.regex` fields means that it is recommended to use the `ExtractPatterns` function from the transform processor instead. To convert your current configuration please check the `ExtractPatterns` function [documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#extractpatterns). You should use the `pattern` parameter of `ExtractPatterns` instead of using the `extract.annotations.regex` and `extract.labels.regex` fields. ##### Example diff --git a/processor/k8sattributesprocessor/e2e_test.go b/processor/k8sattributesprocessor/e2e_test.go index 7de25e62e3b7..147b968ec913 100644 --- a/processor/k8sattributesprocessor/e2e_test.go +++ b/processor/k8sattributesprocessor/e2e_test.go @@ -1099,6 +1099,8 @@ func TestE2E_NamespacedRBACNoPodIP(t *testing.T) { // make docker-otelcontribcol // KUBECONFIG=/tmp/kube-config-otelcol-e2e-testing kind load docker-image otelcontribcol:latest func TestE2E_ClusterRBACCollectorStartAfterTelemetryGen(t *testing.T) { + // TODO: Re-enable this test when the issue being tested here is fully solved: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/37056 + t.Skip("Skipping test as https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/37056 is not fully solved yet") testDir := filepath.Join("testdata", "e2e", "clusterrbac") k8sClient, err := k8stest.NewK8sClient(testKubeConfig) diff --git a/processor/k8sattributesprocessor/internal/kube/client.go b/processor/k8sattributesprocessor/internal/kube/client.go index a589d5a1c1f5..3e38cafeaaef 100644 --- a/processor/k8sattributesprocessor/internal/kube/client.go +++ b/processor/k8sattributesprocessor/internal/kube/client.go @@ -273,7 +273,7 @@ func (c *WatchClient) Start() error { return nil } -// Stop signals the the k8s watcher/informer to stop watching for new events. +// Stop signals the k8s watcher/informer to stop watching for new events. func (c *WatchClient) Stop() { close(c.stopCh) } diff --git a/processor/k8sattributesprocessor/internal/kube/client_test.go b/processor/k8sattributesprocessor/internal/kube/client_test.go index de701f6fd673..5d28ac5b9508 100644 --- a/processor/k8sattributesprocessor/internal/kube/client_test.go +++ b/processor/k8sattributesprocessor/internal/kube/client_test.go @@ -428,7 +428,7 @@ func TestPodDelete(t *testing.T) { // delete empty IP pod c.handlePodDelete(&api_v1.Pod{}) - // delete non-existent IP + // delete nonexistent IP c.deleteQueue = c.deleteQueue[:0] pod := &api_v1.Pod{} pod.Status.PodIP = "9.9.9.9" @@ -494,14 +494,14 @@ func TestNamespaceDelete(t *testing.T) { // delete empty namespace c.handleNamespaceDelete(&api_v1.Namespace{}) - // delete non-existent namespace + // delete nonexistent namespace namespace := &api_v1.Namespace{} namespace.Name = "namespaceC" c.handleNamespaceDelete(namespace) assert.Len(t, c.Namespaces, 2) got := c.Namespaces["namespaceA"] assert.Equal(t, "namespaceA", got.Name) - // delete non-existent namespace when DeletedFinalStateUnknown + // delete nonexistent namespace when DeletedFinalStateUnknown c.handleNamespaceDelete(cache.DeletedFinalStateUnknown{Obj: namespace}) assert.Len(t, c.Namespaces, 2) got = c.Namespaces["namespaceA"] @@ -529,14 +529,14 @@ func TestNodeDelete(t *testing.T) { // delete empty node c.handleNodeDelete(&api_v1.Node{}) - // delete non-existent node + // delete nonexistent node node := &api_v1.Node{} node.Name = "nodeC" c.handleNodeDelete(node) assert.Len(t, c.Nodes, 2) got := c.Nodes["nodeA"] assert.Equal(t, "nodeA", got.Name) - // delete non-existent namespace when DeletedFinalStateUnknown + // delete nonexistent namespace when DeletedFinalStateUnknown c.handleNodeDelete(cache.DeletedFinalStateUnknown{Obj: node}) assert.Len(t, c.Nodes, 2) got = c.Nodes["nodeA"] diff --git a/processor/k8sattributesprocessor/processor_test.go b/processor/k8sattributesprocessor/processor_test.go index ee51cc82d9f0..6e6458ea4c82 100644 --- a/processor/k8sattributesprocessor/processor_test.go +++ b/processor/k8sattributesprocessor/processor_test.go @@ -1296,7 +1296,7 @@ func TestProcessorAddContainerAttributes(t *testing.T) { } } -func TestProcessorPicksUpPassthoughPodIp(t *testing.T) { +func TestProcessorPicksUpPassthroughPodIp(t *testing.T) { m := newMultiTest( t, NewFactory().CreateDefaultConfig(), diff --git a/processor/k8sattributesprocessor/testdata/config.yaml b/processor/k8sattributesprocessor/testdata/config.yaml index 1078ab73b7c5..9ac386ad4763 100644 --- a/processor/k8sattributesprocessor/testdata/config.yaml +++ b/processor/k8sattributesprocessor/testdata/config.yaml @@ -89,7 +89,7 @@ k8sattributes/4: auth_type: "kubeConfig" extract: metadata: - # the following metadata field has been depracated + # the following metadata field has been deprecated - k8s.cluster.name k8sattributes/too_many_sources: diff --git a/processor/logdedupprocessor/config_test.go b/processor/logdedupprocessor/config_test.go index a4e8fb120cd5..23a2b484f542 100644 --- a/processor/logdedupprocessor/config_test.go +++ b/processor/logdedupprocessor/config_test.go @@ -75,7 +75,7 @@ func TestValidateConfig(t *testing.T) { expectedErr: errors.New("an excludefield must start with"), }, { - desc: "invalid duplice exclude field", + desc: "invalid duplicate exclude field", cfg: &Config{ LogCountAttribute: defaultLogCountAttribute, Interval: defaultInterval, diff --git a/processor/logdedupprocessor/field_remover.go b/processor/logdedupprocessor/field_remover.go index bd82a7715214..dd261a14c6f1 100644 --- a/processor/logdedupprocessor/field_remover.go +++ b/processor/logdedupprocessor/field_remover.go @@ -15,7 +15,7 @@ const ( // fieldDelimiter is the delimiter used to split a field key into its parts. fieldDelimiter = "." - // fieldEscapeKeyReplacement is the string used to temporarily replace escaped delimters while splitting a field key. + // fieldEscapeKeyReplacement is the string used to temporarily replace escaped delimiters while splitting a field key. fieldEscapeKeyReplacement = "{TEMP_REPLACE}" ) diff --git a/processor/logdedupprocessor/field_remover_test.go b/processor/logdedupprocessor/field_remover_test.go index 173bee6b2e9e..9972acd3ee9d 100644 --- a/processor/logdedupprocessor/field_remover_test.go +++ b/processor/logdedupprocessor/field_remover_test.go @@ -73,7 +73,7 @@ func TestRemoveFields(t *testing.T) { nestedAttrMap := logRecord.Attributes().PutEmptyMap("nested") nestedAttrMap.PutInt("int", 2) - // Expected attribut map + // Expected attribute map expectedAttrsMap := pcommon.NewMap() expectedAttrsMap.PutStr("str", "attr str") expectedAttrHash := pdatautil.MapHash(expectedAttrsMap) diff --git a/processor/metricsgenerationprocessor/config.go b/processor/metricsgenerationprocessor/config.go index d9be98b559f0..54c857c5604b 100644 --- a/processor/metricsgenerationprocessor/config.go +++ b/processor/metricsgenerationprocessor/config.go @@ -61,7 +61,7 @@ type GenerationType string const ( - // Generates a new metric applying an arithmatic operation with two operands + // Generates a new metric applying an arithmetic operation with two operands calculate GenerationType = "calculate" // Generates a new metric scaling the value of s given metric with a provided constant diff --git a/processor/metricstransformprocessor/README.md b/processor/metricstransformprocessor/README.md index 34b3d25b59e5..c4b87e17307a 100644 --- a/processor/metricstransformprocessor/README.md +++ b/processor/metricstransformprocessor/README.md @@ -311,7 +311,7 @@ operations: ```yaml # Group metrics from one single ResourceMetrics and report them as multiple ResourceMetrics. # -# ex: Consider pod and container metrics collected from Kubernetes. Both the metrics are recorded under under one ResourceMetric +# ex: Consider pod and container metrics collected from Kubernetes. Both the metrics are recorded under one ResourceMetric # applying this transformation will result in two separate ResourceMetric packets with corresponding resource labels in the resource headers # # instead of regular $ use double dollar $$. Because $ is treated as a special character. @@ -320,11 +320,11 @@ operations: - include: ^k8s\.pod\.(.*)$$ match_type: regexp action: group - group_resource_labels: {"resouce.type": "k8s.pod", "source": "kubelet"} + group_resource_labels: {"resource.type": "k8s.pod", "source": "kubelet"} - include: ^container\.(.*)$$ match_type: regexp action: group - group_resource_labels: {"resouce.type": "container", "source": "kubelet"} + group_resource_labels: {"resource.type": "container", "source": "kubelet"} ``` ### Metric Transform Processor vs. [Attributes Processor for Metrics](../attributesprocessor) diff --git a/processor/metricstransformprocessor/config.go b/processor/metricstransformprocessor/config.go index ca08ec49c47c..d86493b97a28 100644 --- a/processor/metricstransformprocessor/config.go +++ b/processor/metricstransformprocessor/config.go @@ -18,7 +18,7 @@ const ( // newNameFieldName is the mapstructure field name for NewName field newNameFieldName = "new_name" - // groupResourceLabelsFieldName is the mapstructure field name for GroupResouceLabels field + // groupResourceLabelsFieldName is the mapstructure field name for GroupResourceLabels field groupResourceLabelsFieldName = "group_resource_labels" // aggregationTypeFieldName is the mapstructure field name for aggregationType field @@ -69,7 +69,7 @@ type transform struct { // REQUIRED only if Action is INSERT. NewName string `mapstructure:"new_name"` - // GroupResourceLabels specifes resource labels that will be appended to this group's new ResourceMetrics message + // GroupResourceLabels specifies resource labels that will be appended to this group's new ResourceMetrics message // REQUIRED only if Action is GROUP GroupResourceLabels map[string]string `mapstructure:"group_resource_labels"` @@ -152,7 +152,7 @@ const ( // Combine combines multiple metrics into a single metric. Combine ConfigAction = "combine" - // Group groups mutiple metrics matching the predicate into multiple ResourceMetrics messages + // Group groups multiple metrics matching the predicate into multiple ResourceMetrics messages Group ConfigAction = "group" ) @@ -168,7 +168,7 @@ func (ca ConfigAction) isValid() bool { return false } -// operationAction is the enum to capture the thress types of actions to perform for an operation. +// operationAction is the enum to capture the types of actions to perform for an operation. type operationAction string const ( diff --git a/processor/metricstransformprocessor/metrics_transform_processor_otlp.go b/processor/metricstransformprocessor/metrics_transform_processor_otlp.go index 398cd67cd96e..cf6ee289bb86 100644 --- a/processor/metricstransformprocessor/metrics_transform_processor_otlp.go +++ b/processor/metricstransformprocessor/metrics_transform_processor_otlp.go @@ -91,9 +91,9 @@ func (f internalFilterRegexp) submatches(metric pmetric.Metric) []int { return f.include.FindStringSubmatchIndex(metric.Name()) } -func (f internalFilterRegexp) expand(metricTempate, metricName string) string { +func (f internalFilterRegexp) expand(metricTemplate, metricName string) string { if submatches := f.include.FindStringSubmatchIndex(metricName); submatches != nil { - return string(f.include.ExpandString([]byte{}, metricTempate, metricName, submatches)) + return string(f.include.ExpandString([]byte{}, metricTemplate, metricName, submatches)) } return "" } @@ -442,7 +442,7 @@ func combine(transform internalTransform, metrics pmetric.MetricSlice) pmetric.M // groupMetrics groups all the provided timeseries that will be aggregated together based on all the label values. // Returns a map of grouped timeseries and the corresponding selected labels -// canBeCombined must be callled before. +// canBeCombined must be called before. func groupMetrics(metrics pmetric.MetricSlice, aggType aggregateutil.AggregationType, to pmetric.Metric) { ag := aggregateutil.AggGroups{} for i := 0; i < metrics.Len(); i++ { diff --git a/processor/probabilisticsamplerprocessor/README.md b/processor/probabilisticsamplerprocessor/README.md index e0059d9050e3..e4409897b0dd 100644 --- a/processor/probabilisticsamplerprocessor/README.md +++ b/processor/probabilisticsamplerprocessor/README.md @@ -60,7 +60,7 @@ instead of using the parent-based approach (e.g., using the `TraceIDRatioBased` sampler for a non-root span), incompleteness may result, and when spans and log records are independently sampled in a processor, as by this component, the same potential for completeness -arises. The consistency guarantee helps minimimize this issue. +arises. The consistency guarantee helps minimize this issue. Consistent probability samplers can be safely used with a mixture of probabilities and preserve sub-trace completeness, provided that child @@ -158,7 +158,7 @@ implies collecting log records from an expected value of 10 pods. OpenTelemetry specifies a consistent sampling mechanism using 56 bits of randomness, which may be obtained from the Trace ID according to the W3C Trace Context Level 2 specification. Randomness can also be -explicly encoding in the OpenTelemetry `tracestate` field, where it is +explicitly encoding in the OpenTelemetry `tracestate` field, where it is known as the R-value. This mode is named because it reduces the number of items transmitted @@ -183,7 +183,7 @@ for every 4 items input. ### Equalizing -This mode uses the same randomness mechanism as the propotional +This mode uses the same randomness mechanism as the proportional sampling mode, in this case considering how much each item was already sampled by preceding samplers. This mode can be used to lower sampling probability to a minimum value across a whole pipeline, @@ -241,7 +241,7 @@ tracestate: ot=th:0;rv:9b8233f7e3a151 This component, using either proportional or equalizing modes, could apply 50% sampling the Span. This span with randomness value `9b8233f7e3a151` is consistently sampled at 50% because the threshold, -when zero padded (i.e., `80000000000000`), is less than the randomess +when zero padded (i.e., `80000000000000`), is less than the randomness value. The resulting span will have the following tracestate: ``` diff --git a/processor/probabilisticsamplerprocessor/logsprocessor.go b/processor/probabilisticsamplerprocessor/logsprocessor.go index fd4fa6b3ff53..970c5321f7b7 100644 --- a/processor/probabilisticsamplerprocessor/logsprocessor.go +++ b/processor/probabilisticsamplerprocessor/logsprocessor.go @@ -166,7 +166,7 @@ func (th *hashingSampler) randomnessFromLogRecord(logRec plog.LogRecord) (random } // randomnessFromLogRecord (hashingSampler) uses OTEP 235 semantic -// conventions basing its deicsion only on the TraceID. +// conventions basing its decision only on the TraceID. func (ctc *consistentTracestateCommon) randomnessFromLogRecord(logRec plog.LogRecord) (randomnessNamer, samplingCarrier, error) { lrc, err := newLogRecordCarrier(logRec) rnd := newMissingRandomnessMethod() diff --git a/processor/probabilisticsamplerprocessor/logsprocessor_test.go b/processor/probabilisticsamplerprocessor/logsprocessor_test.go index 7f675d80a09e..510ff038c92e 100644 --- a/processor/probabilisticsamplerprocessor/logsprocessor_test.go +++ b/processor/probabilisticsamplerprocessor/logsprocessor_test.go @@ -354,7 +354,7 @@ func TestLogsSamplingState(t *testing.T) { tid: mustParseTID("fefefefefefefefefefefefefefefefe"), attrs: map[string]any{ "sampling.threshold": "c", // Corresponds with 25% - "prio": 37, // Lower than 50, higher than 25 + "prio": 37, // Lower than 50, greater than 25 }, sampled: true, adjCount: 4, diff --git a/processor/probabilisticsamplerprocessor/sampler_mode.go b/processor/probabilisticsamplerprocessor/sampler_mode.go index 3fe15612280c..47b74e520b6d 100644 --- a/processor/probabilisticsamplerprocessor/sampler_mode.go +++ b/processor/probabilisticsamplerprocessor/sampler_mode.go @@ -63,7 +63,7 @@ const ( // Proportional uses OpenTelemetry consistent probability // sampling information (OTEP 235), multiplies incoming - // sampling probaiblities. + // sampling probabilities. Proportional SamplerMode = "proportional" // defaultHashSeed is applied when the mode is unset. diff --git a/processor/resourcedetectionprocessor/README.md b/processor/resourcedetectionprocessor/README.md index cf16a5abc9e8..6186156d57ab 100644 --- a/processor/resourcedetectionprocessor/README.md +++ b/processor/resourcedetectionprocessor/README.md @@ -410,7 +410,7 @@ If accurate parsing cannot be performed, the infrastructure resource group value ### Consul -Queries a [consul agent](https://www.consul.io/docs/agent) and reads its' [configuration endpoint](https://www.consul.io/api-docs/agent#read-configuration) to retrieve related resource attributes: +Queries a [consul agent](https://www.consul.io/docs/agent) and reads its [configuration endpoint](https://www.consul.io/api-docs/agent#read-configuration) to retrieve related resource attributes: The list of the populated resource attributes can be found at [Consul Detector Resource Attributes](./internal/consul/documentation.md). @@ -481,11 +481,11 @@ and add this to your workload: fieldPath: spec.nodeName ``` -### Openshift +### OpenShift Queries the OpenShift and Kubernetes API to retrieve related resource attributes. -The list of the populated resource attributes can be found at [Openshift Detector Resource Attributes](./internal/openshift/documentation.md). +The list of the populated resource attributes can be found at [OpenShift Detector Resource Attributes](./internal/openshift/documentation.md). The following permissions are required: ```yaml @@ -588,5 +588,5 @@ Note that if multiple detectors are inserting the same attribute name, the first * ecs * ec2 -The full list of settings exposed for this extension are documented [here](./config.go) -with detailed sample configurations [here](./testdata/config.yaml). +The full list of settings exposed for this extension are documented in [config.go](./config.go) +with detailed sample configurations in [testdata/config.yaml](./testdata/config.yaml). diff --git a/processor/resourcedetectionprocessor/config.go b/processor/resourcedetectionprocessor/config.go index 78fb07a423ba..ba34649e2c72 100644 --- a/processor/resourcedetectionprocessor/config.go +++ b/processor/resourcedetectionprocessor/config.go @@ -80,7 +80,7 @@ type DetectorConfig struct { // SystemConfig contains user-specified configurations for the System detector SystemConfig system.Config `mapstructure:"system"` - // OpenShift contains user-specified configurations for the Openshift detector + // OpenShift contains user-specified configurations for the OpenShift detector OpenShiftConfig openshift.Config `mapstructure:"openshift"` // K8SNode contains user-specified configurations for the K8SNode detector diff --git a/processor/resourcedetectionprocessor/go.mod b/processor/resourcedetectionprocessor/go.mod index 84a80555bd0a..f0a243f22c46 100644 --- a/processor/resourcedetectionprocessor/go.mod +++ b/processor/resourcedetectionprocessor/go.mod @@ -6,6 +6,10 @@ require ( cloud.google.com/go/compute/metadata v0.6.0 github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 github.com/aws/aws-sdk-go v1.55.5 + github.com/aws/aws-sdk-go-v2 v1.32.7 + github.com/aws/aws-sdk-go-v2/config v1.28.6 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.196.0 github.com/google/go-cmp v0.6.0 github.com/hashicorp/consul/api v1.31.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.117.0 @@ -42,6 +46,16 @@ require ( github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Showmax/go-fqdn v1.0.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.47 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect + github.com/aws/smithy-go v1.22.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.5.0 // indirect diff --git a/processor/resourcedetectionprocessor/go.sum b/processor/resourcedetectionprocessor/go.sum index bfda0111b757..5768da501f80 100644 --- a/processor/resourcedetectionprocessor/go.sum +++ b/processor/resourcedetectionprocessor/go.sum @@ -57,6 +57,34 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw= +github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= +github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 h1:kqOrpojG71DxJm/KDPO+Z/y1phm1JlC8/iT+5XRmAn8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22/go.mod h1:NtSFajXVVL8TA2QNngagVZmUtXciyrHOt7xgz4faS/M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.196.0 h1:ZBtoihAqfT+5b1FwGHOubq8k10KwaIyKZd2/CRTucAU= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.196.0/go.mod h1:00zqVNJFK6UASrTnuvjJHJuaqUdkVz5tW8Ip+VhzuNg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= diff --git a/processor/resourcedetectionprocessor/internal/aws/ec2/ec2.go b/processor/resourcedetectionprocessor/internal/aws/ec2/ec2.go index a785030e4f40..21f2183443fc 100644 --- a/processor/resourcedetectionprocessor/internal/aws/ec2/ec2.go +++ b/processor/resourcedetectionprocessor/internal/aws/ec2/ec2.go @@ -9,10 +9,10 @@ import ( "net/http" "regexp" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/processor" conventions "go.opentelemetry.io/collector/semconv/v1.6.1" @@ -32,21 +32,21 @@ const ( var _ internal.Detector = (*Detector)(nil) type ec2ifaceBuilder interface { - buildClient(region string, client *http.Client) (ec2iface.EC2API, error) + buildClient(ctx context.Context, region string, client *http.Client) (ec2.DescribeTagsAPIClient, error) } type ec2ClientBuilder struct{} -func (e *ec2ClientBuilder) buildClient(region string, client *http.Client) (ec2iface.EC2API, error) { - sess, err := session.NewSession(&aws.Config{ - Region: aws.String(region), - HTTPClient: client, - }, +func (e *ec2ClientBuilder) buildClient(ctx context.Context, region string, client *http.Client) (ec2.DescribeTagsAPIClient, error) { + cfg, err := config.LoadDefaultConfig(ctx, + config.WithRegion(region), + config.WithHTTPClient(client), ) if err != nil { return nil, err } - return ec2.New(sess), nil + + return ec2.NewFromConfig(cfg), nil } type Detector struct { @@ -59,7 +59,7 @@ type Detector struct { func NewDetector(set processor.Settings, dcfg internal.DetectorConfig) (internal.Detector, error) { cfg := dcfg.(Config) - sess, err := session.NewSession() + awsConfig, err := config.LoadDefaultConfig(context.Background()) if err != nil { return nil, err } @@ -69,7 +69,7 @@ func NewDetector(set processor.Settings, dcfg internal.DetectorConfig) (internal } return &Detector{ - metadataProvider: ec2provider.NewProvider(sess), + metadataProvider: ec2provider.NewProvider(awsConfig), tagKeyRegexes: tagKeyRegexes, logger: set.Logger, rb: metadata.NewResourceBuilder(cfg.ResourceAttributes), @@ -106,12 +106,12 @@ func (d *Detector) Detect(ctx context.Context) (resource pcommon.Resource, schem if len(d.tagKeyRegexes) != 0 { httpClient := getClientConfig(ctx, d.logger) - ec2Client, err := d.ec2ClientBuilder.buildClient(meta.Region, httpClient) + ec2Client, err := d.ec2ClientBuilder.buildClient(ctx, meta.Region, httpClient) if err != nil { d.logger.Warn("failed to build ec2 client", zap.Error(err)) return res, conventions.SchemaURL, nil } - tags, err := fetchEC2Tags(ec2Client, meta.InstanceID, d.tagKeyRegexes) + tags, err := fetchEC2Tags(ctx, ec2Client, meta.InstanceID, d.tagKeyRegexes) if err != nil { d.logger.Warn("failed fetching ec2 instance tags", zap.Error(err)) } else { @@ -132,13 +132,11 @@ func getClientConfig(ctx context.Context, logger *zap.Logger) *http.Client { return client } -func fetchEC2Tags(svc ec2iface.EC2API, instanceID string, tagKeyRegexes []*regexp.Regexp) (map[string]string, error) { - ec2Tags, err := svc.DescribeTags(&ec2.DescribeTagsInput{ - Filters: []*ec2.Filter{{ - Name: aws.String("resource-id"), - Values: []*string{ - aws.String(instanceID), - }, +func fetchEC2Tags(ctx context.Context, svc ec2.DescribeTagsAPIClient, instanceID string, tagKeyRegexes []*regexp.Regexp) (map[string]string, error) { + ec2Tags, err := svc.DescribeTags(ctx, &ec2.DescribeTagsInput{ + Filters: []types.Filter{{ + Name: aws.String("resource-id"), + Values: []string{instanceID}, }}, }) if err != nil { diff --git a/processor/resourcedetectionprocessor/internal/aws/ec2/ec2_test.go b/processor/resourcedetectionprocessor/internal/aws/ec2/ec2_test.go index 4925b8a6b9a4..d0d6e314e9c5 100644 --- a/processor/resourcedetectionprocessor/internal/aws/ec2/ec2_test.go +++ b/processor/resourcedetectionprocessor/internal/aws/ec2/ec2_test.go @@ -10,9 +10,10 @@ import ( "regexp" "testing" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" @@ -26,7 +27,7 @@ import ( var errUnavailable = errors.New("ec2metadata unavailable") type mockMetadata struct { - retIDDoc ec2metadata.EC2InstanceIdentityDocument + retIDDoc imds.InstanceIdentityDocument retErrIDDoc error retHostname string @@ -39,13 +40,13 @@ var _ ec2provider.Provider = (*mockMetadata)(nil) type mockClientBuilder struct{} -func (e *mockClientBuilder) buildClient(_ string, _ *http.Client) (ec2iface.EC2API, error) { +func (e *mockClientBuilder) buildClient(_ context.Context, _ string, _ *http.Client) (ec2.DescribeTagsAPIClient, error) { return &mockEC2Client{}, nil } type mockClientBuilderError struct{} -func (e *mockClientBuilderError) buildClient(_ string, _ *http.Client) (ec2iface.EC2API, error) { +func (e *mockClientBuilderError) buildClient(_ context.Context, _ string, _ *http.Client) (ec2.DescribeTagsAPIClient, error) { return &mockEC2ClientError{}, nil } @@ -56,9 +57,9 @@ func (mm mockMetadata) InstanceID(_ context.Context) (string, error) { return "", nil } -func (mm mockMetadata) Get(_ context.Context) (ec2metadata.EC2InstanceIdentityDocument, error) { +func (mm mockMetadata) Get(_ context.Context) (imds.InstanceIdentityDocument, error) { if mm.retErrIDDoc != nil { - return ec2metadata.EC2InstanceIdentityDocument{}, mm.retErrIDDoc + return imds.InstanceIdentityDocument{}, mm.retErrIDDoc } return mm.retIDDoc, nil } @@ -111,36 +112,35 @@ func TestNewDetector(t *testing.T) { } // Define a mock client to mock connecting to an EC2 instance -type mockEC2ClientError struct { - ec2iface.EC2API -} +type mockEC2ClientError struct{} // override the DescribeTags function to mock the output from an actual EC2 instance -func (m *mockEC2ClientError) DescribeTags(_ *ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error) { +func (m *mockEC2ClientError) DescribeTags(_ context.Context, _ *ec2.DescribeTagsInput, _ ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) { return nil, errors.New("Error fetching tags") } -type mockEC2Client struct { - ec2iface.EC2API -} +type mockEC2Client struct{} // override the DescribeTags function to mock the output from an actual EC2 instance -func (m *mockEC2Client) DescribeTags(input *ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error) { - if *input.Filters[0].Values[0] == "error" { +func (m *mockEC2Client) DescribeTags(_ context.Context, input *ec2.DescribeTagsInput, _ ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) { + if len(input.Filters) > 0 && len(input.Filters[0].Values) > 0 && input.Filters[0].Values[0] == "error" { return nil, errors.New("error") } - tag1 := "tag1" - tag2 := "tag2" - resource1 := "resource1" - val1 := "val1" - val2 := "val2" - resourceType := "type" - return &ec2.DescribeTagsOutput{ - Tags: []*ec2.TagDescription{ - {Key: &tag1, ResourceId: &resource1, ResourceType: &resourceType, Value: &val1}, - {Key: &tag2, ResourceId: &resource1, ResourceType: &resourceType, Value: &val2}, + Tags: []ec2types.TagDescription{ + { + Key: aws.String("tag1"), + ResourceId: aws.String("resource1"), + ResourceType: "type", + Value: aws.String("val1"), + }, + { + Key: aws.String("tag2"), + ResourceId: aws.String("resource1"), + ResourceType: "type", + Value: aws.String("val2"), + }, }, }, nil } @@ -164,7 +164,7 @@ func TestDetector_Detect(t *testing.T) { { name: "success", fields: fields{metadataProvider: &mockMetadata{ - retIDDoc: ec2metadata.EC2InstanceIdentityDocument{ + retIDDoc: imds.InstanceIdentityDocument{ Region: "us-west-2", AccountID: "account1234", AvailabilityZone: "us-west-2a", @@ -194,7 +194,7 @@ func TestDetector_Detect(t *testing.T) { { name: "success with tags", fields: fields{metadataProvider: &mockMetadata{ - retIDDoc: ec2metadata.EC2InstanceIdentityDocument{ + retIDDoc: imds.InstanceIdentityDocument{ Region: "us-west-2", AccountID: "account1234", AvailabilityZone: "us-west-2a", @@ -228,7 +228,7 @@ func TestDetector_Detect(t *testing.T) { { name: "success without tags returned from describeTags", fields: fields{metadataProvider: &mockMetadata{ - retIDDoc: ec2metadata.EC2InstanceIdentityDocument{ + retIDDoc: imds.InstanceIdentityDocument{ Region: "us-west-2", AccountID: "account1234", AvailabilityZone: "us-west-2a", @@ -259,7 +259,7 @@ func TestDetector_Detect(t *testing.T) { { name: "endpoint not available", fields: fields{metadataProvider: &mockMetadata{ - retIDDoc: ec2metadata.EC2InstanceIdentityDocument{}, + retIDDoc: imds.InstanceIdentityDocument{}, retErrIDDoc: errors.New("should not be called"), isAvailable: false, }}, @@ -270,7 +270,7 @@ func TestDetector_Detect(t *testing.T) { { name: "get fails", fields: fields{metadataProvider: &mockMetadata{ - retIDDoc: ec2metadata.EC2InstanceIdentityDocument{}, + retIDDoc: imds.InstanceIdentityDocument{}, retErrIDDoc: errors.New("get failed"), isAvailable: true, }}, @@ -281,7 +281,7 @@ func TestDetector_Detect(t *testing.T) { { name: "hostname fails", fields: fields{metadataProvider: &mockMetadata{ - retIDDoc: ec2metadata.EC2InstanceIdentityDocument{}, + retIDDoc: imds.InstanceIdentityDocument{}, retHostname: "", retErrHostname: errors.New("hostname failed"), isAvailable: true, @@ -354,7 +354,7 @@ func TestEC2Tags(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { m := &mockEC2Client{} - output, err := fetchEC2Tags(m, tt.resourceID, tt.tagKeyRegexes) + output, err := fetchEC2Tags(context.Background(), m, tt.resourceID, tt.tagKeyRegexes) if tt.shouldError { assert.Error(t, err) return diff --git a/processor/resourcedetectionprocessor/internal/heroku/documentation.md b/processor/resourcedetectionprocessor/internal/heroku/documentation.md index 674a47c4bade..ac927afa7c53 100644 --- a/processor/resourcedetectionprocessor/internal/heroku/documentation.md +++ b/processor/resourcedetectionprocessor/internal/heroku/documentation.md @@ -15,4 +15,4 @@ | heroku.release.creation_timestamp | The heroku.release.creation_timestamp | Any Str | true | | service.instance.id | The service.instance.id | Any Str | true | | service.name | Heroku app name recorded as service.name. | Any Str | true | -| service.version | Heroku relese version set as service.version. | Any Str | true | +| service.version | Heroku release version set as service.version. | Any Str | true | diff --git a/processor/resourcedetectionprocessor/internal/heroku/metadata.yaml b/processor/resourcedetectionprocessor/internal/heroku/metadata.yaml index 275932d67fb7..3deaa872d0fa 100644 --- a/processor/resourcedetectionprocessor/internal/heroku/metadata.yaml +++ b/processor/resourcedetectionprocessor/internal/heroku/metadata.yaml @@ -32,6 +32,6 @@ resource_attributes: enabled: true type: string service.version: - description: Heroku relese version set as service.version. + description: Heroku release version set as service.version. enabled: true type: string diff --git a/processor/resourcedetectionprocessor/internal/openshift/config.go b/processor/resourcedetectionprocessor/internal/openshift/config.go index 408bcc760984..bed20f02f493 100644 --- a/processor/resourcedetectionprocessor/internal/openshift/config.go +++ b/processor/resourcedetectionprocessor/internal/openshift/config.go @@ -47,7 +47,7 @@ type Config struct { Token string `mapstructure:"token"` // TLSSettings contains TLS configurations that are specific to client - // connection used to communicate with the Openshift API. + // connection used to communicate with the OpenShift API. TLSSettings configtls.ClientConfig `mapstructure:"tls"` ResourceAttributes metadata.ResourceAttributesConfig `mapstructure:"resource_attributes"` diff --git a/processor/resourcedetectionprocessor/internal/resourcedetection_test.go b/processor/resourcedetectionprocessor/internal/resourcedetection_test.go index 4fcba6437995..f07702adac8b 100644 --- a/processor/resourcedetectionprocessor/internal/resourcedetection_test.go +++ b/processor/resourcedetectionprocessor/internal/resourcedetection_test.go @@ -119,7 +119,7 @@ func TestDetectResource_InvalidDetectorType(t *testing.T) { require.EqualError(t, err, fmt.Sprintf("invalid detector key: %v", mockDetectorKey)) } -func TestDetectResource_DetectoryFactoryError(t *testing.T) { +func TestDetectResource_DetectorFactoryError(t *testing.T) { mockDetectorKey := DetectorType("mock") p := NewProviderFactory(map[DetectorType]DetectorFactory{ mockDetectorKey: func(processor.Settings, DetectorConfig) (Detector, error) { diff --git a/processor/routingprocessor/README.md b/processor/routingprocessor/README.md index 5ae2c563d5c7..3a4dbc65b6a7 100644 --- a/processor/routingprocessor/README.md +++ b/processor/routingprocessor/README.md @@ -175,7 +175,7 @@ It is also possible to mix both the conventional routing configuration and the r - [delete_key](../../pkg/ottl/ottlfuncs/README.md#delete_key) - [delete_matching_keys](../../pkg/ottl/ottlfuncs/README.md#delete_matching_keys) -The full list of settings exposed for this processor are documented [here](./config.go) with detailed sample configuration files: +The full list of settings exposed for this processor are documented in [config.go](./config.go) with detailed sample configuration files: - [logs](./testdata/config_logs.yaml) - [metrics](./testdata/config_metrics.yaml) diff --git a/processor/routingprocessor/extract_test.go b/processor/routingprocessor/extract_test.go index b5a3136ab23b..243f2fa9b7b7 100644 --- a/processor/routingprocessor/extract_test.go +++ b/processor/routingprocessor/extract_test.go @@ -68,7 +68,7 @@ func TestExtractorForTraces_FromContext(t *testing.T) { expectedValue: "acme", }, { - name: "value from existing HTTP attribute: case insensitive", + name: "value from existing HTTP attribute: case-insensitive", ctxFunc: func() context.Context { return client.NewContext(context.Background(), client.Info{Metadata: client.NewMetadata(map[string][]string{ diff --git a/processor/schemaprocessor/README.md b/processor/schemaprocessor/README.md index 93af6889ffed..08e322121d71 100644 --- a/processor/schemaprocessor/README.md +++ b/processor/schemaprocessor/README.md @@ -26,7 +26,7 @@ the translations needed for signals that match the schema URL. ## Schema Formats -A schema URl is made up in two parts, _Schema Family_ and _Schema Version_, the schema URL is broken down like so: +A [schema URL](https://opentelemetry.io/docs/reference/specification/schemas/overview/#schema-url) is made up in two parts, _Schema Family_ and _Schema Version_, the schema URL is broken down like so: ```text | Schema URL | @@ -35,7 +35,6 @@ A schema URl is made up in two parts, _Schema Family_ and _Schema Version_, the ``` The final path in the schema URL _MUST_ be the schema version and the preceding portion of the URL is the _Schema Family_. -To read about schema formats, please read more [here](https://opentelemetry.io/docs/reference/specification/schemas/overview/#schema-url) ## Targets Schemas diff --git a/processor/schemaprocessor/internal/fixture/parallel.go b/processor/schemaprocessor/internal/fixture/parallel.go index efbcff92de40..c7c0eb5233de 100644 --- a/processor/schemaprocessor/internal/fixture/parallel.go +++ b/processor/schemaprocessor/internal/fixture/parallel.go @@ -14,7 +14,7 @@ import ( ) // ParallelRaceCompute starts `count` number of go routines that calls the provided function `fn` -// at the same to allow the race detector greater oppotunity to capture known race conditions. +// at the same to allow the race detector greater opportunity to capture known race conditions. // This method blocks until each count number of fn has completed, any returned errors is considered // a failing test method. // If the race detector is not enabled, the function then skips with an notice. diff --git a/processor/schemaprocessor/internal/migrate/conditional_test.go b/processor/schemaprocessor/internal/migrate/conditional_test.go index de6e5cad298c..8c2c6a10d3d6 100644 --- a/processor/schemaprocessor/internal/migrate/conditional_test.go +++ b/processor/schemaprocessor/internal/migrate/conditional_test.go @@ -39,7 +39,7 @@ func TestConditionalAttributeSetApply(t *testing.T) { }, "application start", ), - check: "datatbase operation", + check: "database operation", attr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("service.version", "v0.0.0") }), @@ -48,13 +48,13 @@ func TestConditionalAttributeSetApply(t *testing.T) { }), }, { - name: "No condition set, applys to all", + name: "No condition set, applies to all", cond: NewConditionalAttributeSet[string]( map[string]string{ "service.version": "application.version", }, ), - check: "datatbase operation", + check: "database operation", attr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("service.version", "v0.0.0") }), @@ -118,7 +118,7 @@ func TestConditionalAttributeSetRollback(t *testing.T) { }, "application start", ), - check: "datatbase operation", + check: "database operation", attr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("service.version", "v0.0.0") }), @@ -127,13 +127,13 @@ func TestConditionalAttributeSetRollback(t *testing.T) { }), }, { - name: "No condition set, applys to all", + name: "No condition set, applies to all", cond: NewConditionalAttributeSet[string]( map[string]string{ "service.version": "application.version", }, ), - check: "datatbase operation", + check: "database operation", attr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("application.version", "v0.0.0") }), diff --git a/processor/schemaprocessor/internal/migrate/multi_conditional_test.go b/processor/schemaprocessor/internal/migrate/multi_conditional_test.go index 98c2c204b43a..af1013e432f4 100644 --- a/processor/schemaprocessor/internal/migrate/multi_conditional_test.go +++ b/processor/schemaprocessor/internal/migrate/multi_conditional_test.go @@ -40,7 +40,7 @@ func TestMultiConditionalAttributeSetApply(t *testing.T) { }, map[string][]string{"span.name": {"application start"}}, ), - inCondData: map[string]string{"span.name": "datatbase operation"}, + inCondData: map[string]string{"span.name": "database operation"}, inAttr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("service.version", "v0.0.0") }), @@ -49,14 +49,14 @@ func TestMultiConditionalAttributeSetApply(t *testing.T) { }), }, { - name: "No condition set, applys to all", + name: "No condition set, applies to all", cond: NewMultiConditionalAttributeSet[string]( map[string]string{ "service.version": "application.version", }, map[string][]string{}, ), - inCondData: map[string]string{"span.name": "datatbase operation"}, + inCondData: map[string]string{"span.name": "database operation"}, inAttr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("service.version", "v0.0.0") }), diff --git a/processor/sumologicprocessor/README.md b/processor/sumologicprocessor/README.md index d49a71d42bf9..4f46afec810a 100644 --- a/processor/sumologicprocessor/README.md +++ b/processor/sumologicprocessor/README.md @@ -101,7 +101,7 @@ processors: ### Adding `cloud.namespace` resource attribute Some of the apps in Sumo Logic require the `cloud.namespace` attribute to be set -to better understand the data coming from AWS EC2, AWS ECS and AWS Elactic Beanstalk. +to better understand the data coming from AWS EC2, AWS ECS and AWS Elastic Beanstalk. This attribute is similar to the standard OpenTelemetry attribute [`cloud.provider`][opentelemetry_cloud_provider_attribute]. In the future, the Sumo Logic apps might switch to the standard `cloud.provider` attribute. Before this happens, the following mapping defines the relationship between `cloud.provider` and `cloud.namespace` values: diff --git a/processor/sumologicprocessor/config.go b/processor/sumologicprocessor/config.go index ec38c01da927..4c7c3ee55608 100644 --- a/processor/sumologicprocessor/config.go +++ b/processor/sumologicprocessor/config.go @@ -26,7 +26,7 @@ const ( defaultAddCloudNamespace = true defaultTranslateAttributes = true defaultTranslateTelegrafAttributes = true - defaultTranlateDockerMetrics = false + defaultTranslateDockerMetrics = false // Nesting processor default config defaultNestingEnabled = false @@ -72,7 +72,7 @@ func createDefaultConfig() component.Config { SpanIDAttribute: &logFieldAttribute{defaultAddSpanIDAttribute, SpanIDAttributeName}, TraceIDAttribute: &logFieldAttribute{defaultAddTraceIDAttribute, TraceIDAttributeName}, }, - TranslateDockerMetrics: defaultTranlateDockerMetrics, + TranslateDockerMetrics: defaultTranslateDockerMetrics, } } diff --git a/processor/sumologicprocessor/processor_test.go b/processor/sumologicprocessor/processor_test.go index 6f9c11a726c0..02f31879a84f 100644 --- a/processor/sumologicprocessor/processor_test.go +++ b/processor/sumologicprocessor/processor_test.go @@ -86,7 +86,7 @@ func TestAddCloudNamespaceForLogs(t *testing.T) { }, }, { - name: "does not add cloud.namespce attribute when disabled", + name: "does not add cloud.namespace attribute when disabled", addCloudNamespace: false, createLogs: func() plog.Logs { inputLogs := plog.NewLogs() @@ -212,7 +212,7 @@ func TestAddCloudNamespaceForMetrics(t *testing.T) { }, }, { - name: "does not add cloud.namespce attribute when disabled", + name: "does not add cloud.namespace attribute when disabled", addCloudNamespace: false, createMetrics: func() pmetric.Metrics { inputMetrics := pmetric.NewMetrics() @@ -338,7 +338,7 @@ func TestAddCloudNamespaceForTraces(t *testing.T) { }, }, { - name: "does not add cloud.namespce attribute when disabled", + name: "does not add cloud.namespace attribute when disabled", addCloudNamespace: false, createTraces: func() ptrace.Traces { inputTraces := ptrace.NewTraces() diff --git a/processor/sumologicprocessor/translate_docker_metrics_processor.go b/processor/sumologicprocessor/translate_docker_metrics_processor.go index 9e103bd8bcaa..2e8ee41ee29d 100644 --- a/processor/sumologicprocessor/translate_docker_metrics_processor.go +++ b/processor/sumologicprocessor/translate_docker_metrics_processor.go @@ -68,7 +68,7 @@ var dockerMetricsTranslations = map[string]string{ "container.blockio.sectors_recursive": "sectors_recursive", } -var dockerReasourceAttributeTranslations = map[string]string{ +var dockerResourceAttributeTranslations = map[string]string{ "container.id": "container.FullID", "container.image.name": "container.ImageName", "container.name": "container.Name", @@ -132,7 +132,7 @@ func translateDockerResourceAttributes(attributes pcommon.Map) { result.EnsureCapacity(attributes.Len()) attributes.Range(func(otKey string, value pcommon.Value) bool { - if sumoKey, ok := dockerReasourceAttributeTranslations[otKey]; ok { + if sumoKey, ok := dockerResourceAttributeTranslations[otKey]; ok { // Only insert if it doesn't exist yet to prevent overwriting. // We have to do it this way since the final return value is not // ready yet to rely on .Insert() not overwriting. diff --git a/processor/sumologicprocessor/translate_docker_metrics_processor_test.go b/processor/sumologicprocessor/translate_docker_metrics_processor_test.go index a6dd12f78ae6..5f3c871bdc87 100644 --- a/processor/sumologicprocessor/translate_docker_metrics_processor_test.go +++ b/processor/sumologicprocessor/translate_docker_metrics_processor_test.go @@ -77,7 +77,7 @@ func TestTranslateDockerMetric_NamesAreTranslatedCorrectly(t *testing.T) { } } -func TestTranslateDockerMetric_ResourceAttrbutesAreTranslatedCorrectly(t *testing.T) { +func TestTranslateDockerMetric_ResourceAttributesAreTranslatedCorrectly(t *testing.T) { testcases := []struct { nameIn string nameOut string diff --git a/processor/tailsamplingprocessor/README.md b/processor/tailsamplingprocessor/README.md index 0c6c9978f588..a6df70b6566a 100644 --- a/processor/tailsamplingprocessor/README.md +++ b/processor/tailsamplingprocessor/README.md @@ -49,7 +49,7 @@ The following configuration options can also be modified: - `decision_cache`: Options for configuring caches for sampling decisions. You may want to vary the size of these caches depending on how many "keep" vs "drop" decisions you expect from your policies. For example, you may allocate a larger `non_sampled_cache_size` if you expect most traces to be dropped. - Additionally, if using, configure this as much higher than `num_traces` so decisions for trace IDs are kept + Additionally, if using, configure this as much greater than `num_traces` so decisions for trace IDs are kept longer than the span data for the trace. - `sampled_cache_size` (default = 0): Configures amount of trace IDs to be kept in an LRU cache, persisting the "keep" decisions for traces that may have already been released from memory. @@ -469,7 +469,7 @@ A circular buffer is used to ensure the number of traces in-memory doesn't excee otelcol_processor_tail_sampling_sampling_trace_dropped_too_early ``` -**Pre-emptively Preventing Dropped Traces** +**Preemptively Preventing Dropped Traces** A trace is dropped without sampling if it's removed from the circular buffer before `decision_wait`. diff --git a/processor/tailsamplingprocessor/config.go b/processor/tailsamplingprocessor/config.go index 1b18c039fb00..9bda384d64b2 100644 --- a/processor/tailsamplingprocessor/config.go +++ b/processor/tailsamplingprocessor/config.go @@ -190,7 +190,7 @@ type StringAttributeCfg struct { // RateLimitingCfg holds the configurable settings to create a rate limiting // sampling policy evaluator. type RateLimitingCfg struct { - // SpansPerSecond sets the limit on the maximum nuber of spans that can be processed each second. + // SpansPerSecond sets the limit on the maximum number of spans that can be processed each second. SpansPerSecond int64 `mapstructure:"spans_per_second"` } @@ -227,12 +227,12 @@ type OTTLConditionCfg struct { type DecisionCacheConfig struct { // SampledCacheSize specifies the size of the cache that holds the sampled trace IDs. // This value will be the maximum amount of trace IDs that the cache can hold before overwriting previous IDs. - // For effective use, this value should be at least an order of magnitude higher than Config.NumTraces. + // For effective use, this value should be at least an order of magnitude greater than Config.NumTraces. // If left as default 0, a no-op DecisionCache will be used. SampledCacheSize int `mapstructure:"sampled_cache_size"` // NonSampledCacheSize specifies the size of the cache that holds the non-sampled trace IDs. // This value will be the maximum amount of trace IDs that the cache can hold before overwriting previous IDs. - // For effective use, this value should be at least an order of magnitude higher than Config.NumTraces. + // For effective use, this value should be at least an order of magnitude greater than Config.NumTraces. // If left as default 0, a no-op DecisionCache will be used. NonSampledCacheSize int `mapstructure:"non_sampled_cache_size"` } diff --git a/processor/tailsamplingprocessor/internal/sampling/composite_test.go b/processor/tailsamplingprocessor/internal/sampling/composite_test.go index c323fe849946..c813bdeddabf 100644 --- a/processor/tailsamplingprocessor/internal/sampling/composite_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/composite_test.go @@ -119,7 +119,7 @@ func TestCompositeEvaluator_OverflowAlwaysSampled(t *testing.T) { decision, err = c.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate composite policy: %v", err) - // The first policy fails as the tag value is higher than the range set where as the second policy is AlwaysSample, so the decision should be Sampled. + // The first policy fails as the tag value is greater than the range set whereas the second policy is AlwaysSample, so the decision should be Sampled. expected = Sampled assert.Equal(t, expected, decision) } diff --git a/processor/tailsamplingprocessor/internal/sampling/latency.go b/processor/tailsamplingprocessor/internal/sampling/latency.go index be87f47165c9..2b24ba3a2496 100644 --- a/processor/tailsamplingprocessor/internal/sampling/latency.go +++ b/processor/tailsamplingprocessor/internal/sampling/latency.go @@ -20,7 +20,7 @@ type latency struct { var _ PolicyEvaluator = (*latency)(nil) -// NewLatency creates a policy evaluator sampling traces with a duration higher than a configured threshold +// NewLatency creates a policy evaluator sampling traces with a duration greater than a configured threshold func NewLatency(settings component.TelemetrySettings, thresholdMs int64, upperThresholdMs int64) PolicyEvaluator { return &latency{ logger: settings.Logger, diff --git a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter.go b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter.go index dd22d04eaa24..02f965af8bd5 100644 --- a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter.go +++ b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter.go @@ -92,7 +92,7 @@ func NewStringAttributeFilter(settings component.TelemetrySettings, key string, // The SamplingDecision is made by comparing the attribute values with the matching values, // which might be static strings or regular expressions. func (saf *stringAttributeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) { - saf.logger.Debug("Evaluting spans in string-tag filter") + saf.logger.Debug("Evaluating spans in string-tag filter") trace.Lock() defer trace.Unlock() batches := trace.ReceivedBatches @@ -111,8 +111,8 @@ func (saf *stringAttributeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, }, func(span ptrace.Span) bool { if v, ok := span.Attributes().Get(saf.key); ok { - truncableStr := v.Str() - if len(truncableStr) > 0 { + truncatableStr := v.Str() + if len(truncatableStr) > 0 { if ok := saf.matcher(v.Str()); ok { return false } @@ -135,8 +135,8 @@ func (saf *stringAttributeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, }, func(span ptrace.Span) bool { if v, ok := span.Attributes().Get(saf.key); ok { - truncableStr := v.Str() - if len(truncableStr) > 0 { + truncatableStr := v.Str() + if len(truncatableStr) > 0 { if ok := saf.matcher(v.Str()); ok { return true } diff --git a/processor/transformprocessor/README.md b/processor/transformprocessor/README.md index c207dbdf63b8..4651ab6be710 100644 --- a/processor/transformprocessor/README.md +++ b/processor/transformprocessor/README.md @@ -264,7 +264,7 @@ The `extract_count_metric` function creates a new Sum metric from a Histogram, E `is_monotonic` is a boolean representing the monotonicity of the new metric. -The name for the new metric will be `_count`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, `description`, and `aggregation_temporality`. As metrics of type Summary don't have an `aggregation_temporality` field, this field will be set to `AGGREGATION_TEMPORALITY_CUMULATIVE` for those metrics. +The name for the new metric will be `_count`. The fields that are copied are: `timestamp`, `starttimestamp`, `attributes`, `description`, and `aggregation_temporality`. As metrics of type Summary don't have an `aggregation_temporality` field, this field will be set to `AGGREGATION_TEMPORALITY_CUMULATIVE` for those metrics. The new metric that is created will be passed to all subsequent statements in the metrics statements list. @@ -288,7 +288,7 @@ The `extract_sum_metric` function creates a new Sum metric from a Histogram, Exp `is_monotonic` is a boolean representing the monotonicity of the new metric. -The name for the new metric will be `_sum`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, `description`, and `aggregation_temporality`. As metrics of type Summary don't have an `aggregation_temporality` field, this field will be set to `AGGREGATION_TEMPORALITY_CUMULATIVE` for those metrics. +The name for the new metric will be `_sum`. The fields that are copied are: `timestamp`, `starttimestamp`, `attributes`, `description`, and `aggregation_temporality`. As metrics of type Summary don't have an `aggregation_temporality` field, this field will be set to `AGGREGATION_TEMPORALITY_CUMULATIVE` for those metrics. The new metric that is created will be passed to all subsequent statements in the metrics statements list. @@ -309,7 +309,7 @@ The `convert_summary_count_val_to_sum` function creates a new Sum metric from a `aggregation_temporality` is a string (`"cumulative"` or `"delta"`) representing the desired aggregation temporality of the new metric. `is_monotonic` is a boolean representing the monotonicity of the new metric. -The name for the new metric will be `_count`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, and `description`. The new metric that is created will be passed to all functions in the metrics statements list. Function conditions will apply. +The name for the new metric will be `_count`. The fields that are copied are: `timestamp`, `starttimestamp`, `attributes`, and `description`. The new metric that is created will be passed to all functions in the metrics statements list. Function conditions will apply. **NOTE:** This function may cause a metric to break semantics for [Sum metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums). Use at your own risk. @@ -328,7 +328,7 @@ The `convert_summary_sum_val_to_sum` function creates a new Sum metric from a Su `aggregation_temporality` is a string (`"cumulative"` or `"delta"`) representing the desired aggregation temporality of the new metric. `is_monotonic` is a boolean representing the monotonicity of the new metric. -The name for the new metric will be `_sum`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, and `description`. The new metric that is created will be passed to all functions in the metrics statements list. Function conditions will apply. +The name for the new metric will be `_sum`. The fields that are copied are: `timestamp`, `starttimestamp`, `attributes`, and `description`. The new metric that is created will be passed to all functions in the metrics statements list. Function conditions will apply. **NOTE:** This function may cause a metric to break semantics for [Sum metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums). Use at your own risk. diff --git a/processor/transformprocessor/internal/common/metrics.go b/processor/transformprocessor/internal/common/metrics.go index 3ae07920ca2c..f4cec79cd15e 100644 --- a/processor/transformprocessor/internal/common/metrics.go +++ b/processor/transformprocessor/internal/common/metrics.go @@ -88,7 +88,7 @@ func (d dataPointStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metr case pmetric.MetricTypeHistogram: err = d.handleHistogramDataPoints(ctx, metric.Histogram().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) case pmetric.MetricTypeExponentialHistogram: - err = d.handleExponetialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) + err = d.handleExponentialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) case pmetric.MetricTypeSummary: err = d.handleSummaryDataPoints(ctx, metric.Summary().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) } @@ -135,7 +135,7 @@ func (d dataPointStatements) handleHistogramDataPoints(ctx context.Context, dps return nil } -func (d dataPointStatements) handleExponetialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics) error { +func (d dataPointStatements) handleExponentialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics) error { for i := 0; i < dps.Len(); i++ { tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics) condition, err := d.BoolExpr.Eval(ctx, tCtx) diff --git a/processor/transformprocessor/internal/logs/processor_test.go b/processor/transformprocessor/internal/logs/processor_test.go index d3e06f65ac01..448328138c21 100644 --- a/processor/transformprocessor/internal/logs/processor_test.go +++ b/processor/transformprocessor/internal/logs/processor_test.go @@ -366,13 +366,13 @@ func Test_ProcessLogs_LogContext(t *testing.T) { func Test_ProcessLogs_MixContext(t *testing.T) { tests := []struct { - name string - contextStatments []common.ContextStatements - want func(td plog.Logs) + name string + contextStatements []common.ContextStatements + want func(td plog.Logs) }{ { name: "set resource and then use", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "resource", Statements: []string{ @@ -394,7 +394,7 @@ func Test_ProcessLogs_MixContext(t *testing.T) { }, { name: "set scope and then use", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "scope", Statements: []string{ @@ -416,7 +416,7 @@ func Test_ProcessLogs_MixContext(t *testing.T) { }, { name: "order matters", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "log", Statements: []string{ @@ -436,7 +436,7 @@ func Test_ProcessLogs_MixContext(t *testing.T) { }, { name: "reuse context", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "scope", Statements: []string{ @@ -467,7 +467,7 @@ func Test_ProcessLogs_MixContext(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { td := constructLogs() - processor, err := NewProcessor(tt.contextStatments, ottl.IgnoreError, false, componenttest.NewNopTelemetrySettings()) + processor, err := NewProcessor(tt.contextStatements, ottl.IgnoreError, false, componenttest.NewNopTelemetrySettings()) assert.NoError(t, err) _, err = processor.ProcessLogs(context.Background(), td) diff --git a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go index f23136ac5d66..167d5293461d 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go +++ b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go @@ -199,7 +199,7 @@ var midpointAlgorithm distAlgorithm = func(count uint64, (*bucketCountsDst)[len(boundaries)-1] += count // Overflow bucket } -// uniformAlgorithm distributes counts from a given set of bucket sounrces into a set of linear boundaries using uniform distribution +// uniformAlgorithm distributes counts from a given set of bucket sources into a set of linear boundaries using uniform distribution var uniformAlgorithm distAlgorithm = func(count uint64, upper, lower float64, boundaries []float64, bucketCountsDst *[]uint64, diff --git a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go index aee2cdf07fea..4aede94561f5 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go +++ b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go @@ -191,7 +191,7 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { // 0 scale exponential histogram will result in an extremely large upper bound // resulting in all the counts being in buckets much larger than the explicit bounds // thus all counts will be in the overflow bucket - name: "0 scale expontential histogram given using upper distribute", + name: "0 scale exponential histogram given using upper distribute", input: func() pmetric.Metric { m := pmetric.NewMetric() defaultTestMetric().CopyTo(m) @@ -221,7 +221,7 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { }, }, { - name: "empty expontential histogram given using upper distribute", + name: "empty exponential histogram given using upper distribute", input: func() pmetric.Metric { m := pmetric.NewMetric() m.SetName("empty") @@ -236,7 +236,7 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { }, }, { - name: "non-expontential histogram", + name: "non-exponential histogram", arg: []float64{0}, distribution: "upper", input: nonExponentialHist, @@ -403,7 +403,7 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { }, }, { - name: "empty expontential histogram given", + name: "empty exponential histogram given", input: func() pmetric.Metric { m := pmetric.NewMetric() m.SetName("empty") @@ -418,7 +418,7 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { }, }, { - name: "non-expontential histogram given using upper distribute", + name: "non-exponential histogram given using upper distribute", arg: []float64{0}, distribution: "midpoint", input: nonExponentialHist, @@ -448,7 +448,7 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { } } -func TestUniforn_convert_exponential_hist_to_explicit_hist(t *testing.T) { +func TestUniform_convert_exponential_hist_to_explicit_hist(t *testing.T) { ts := pcommon.NewTimestampFromTime(time.Now()) defaultTestMetric := func() pmetric.Metric { m := pmetric.NewMetric() diff --git a/processor/transformprocessor/internal/metrics/processor_test.go b/processor/transformprocessor/internal/metrics/processor_test.go index 6087fcd70d74..128a9d00ced0 100644 --- a/processor/transformprocessor/internal/metrics/processor_test.go +++ b/processor/transformprocessor/internal/metrics/processor_test.go @@ -726,13 +726,13 @@ func Test_ProcessMetrics_DataPointContext(t *testing.T) { func Test_ProcessMetrics_MixContext(t *testing.T) { tests := []struct { - name string - contextStatments []common.ContextStatements - want func(td pmetric.Metrics) + name string + contextStatements []common.ContextStatements + want func(td pmetric.Metrics) }{ { name: "set resource and then use", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "resource", Statements: []string{ @@ -761,7 +761,7 @@ func Test_ProcessMetrics_MixContext(t *testing.T) { }, { name: "set scope and then use", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "scope", Statements: []string{ @@ -790,7 +790,7 @@ func Test_ProcessMetrics_MixContext(t *testing.T) { }, { name: "order matters", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "datapoint", Statements: []string{ @@ -810,7 +810,7 @@ func Test_ProcessMetrics_MixContext(t *testing.T) { }, { name: "reuse context ", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "scope", Statements: []string{ @@ -848,7 +848,7 @@ func Test_ProcessMetrics_MixContext(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { td := constructMetrics() - processor, err := NewProcessor(tt.contextStatments, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) + processor, err := NewProcessor(tt.contextStatements, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) assert.NoError(t, err) _, err = processor.ProcessMetrics(context.Background(), td) diff --git a/processor/transformprocessor/internal/traces/processor_test.go b/processor/transformprocessor/internal/traces/processor_test.go index e6928ba9fa38..0da86dfeb262 100644 --- a/processor/transformprocessor/internal/traces/processor_test.go +++ b/processor/transformprocessor/internal/traces/processor_test.go @@ -443,13 +443,13 @@ func Test_ProcessTraces_SpanEventContext(t *testing.T) { func Test_ProcessTraces_MixContext(t *testing.T) { tests := []struct { - name string - contextStatments []common.ContextStatements - want func(td ptrace.Traces) + name string + contextStatements []common.ContextStatements + want func(td ptrace.Traces) }{ { name: "set resource and then use", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "resource", Statements: []string{ @@ -471,7 +471,7 @@ func Test_ProcessTraces_MixContext(t *testing.T) { }, { name: "set scope and then use", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "scope", Statements: []string{ @@ -493,7 +493,7 @@ func Test_ProcessTraces_MixContext(t *testing.T) { }, { name: "order matters", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "span", Statements: []string{ @@ -513,7 +513,7 @@ func Test_ProcessTraces_MixContext(t *testing.T) { }, { name: "reuse context", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "scope", Statements: []string{ @@ -544,7 +544,7 @@ func Test_ProcessTraces_MixContext(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { td := constructTraces() - processor, err := NewProcessor(tt.contextStatments, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) + processor, err := NewProcessor(tt.contextStatements, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) assert.NoError(t, err) _, err = processor.ProcessTraces(context.Background(), td) diff --git a/receiver/awss3receiver/go.mod b/receiver/awss3receiver/go.mod index fc101b5607cc..91d9e27f21df 100644 --- a/receiver/awss3receiver/go.mod +++ b/receiver/awss3receiver/go.mod @@ -7,7 +7,7 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.28.7 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.45 github.com/aws/aws-sdk-go-v2/service/s3 v1.72.0 - github.com/open-telemetry/opamp-go v0.17.0 + github.com/open-telemetry/opamp-go v0.18.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages v0.117.0 github.com/stretchr/testify v1.10.0 go.opentelemetry.io/collector/component v0.117.0 diff --git a/receiver/awss3receiver/go.sum b/receiver/awss3receiver/go.sum index f27b906fe21f..ff44222ee522 100644 --- a/receiver/awss3receiver/go.sum +++ b/receiver/awss3receiver/go.sum @@ -79,8 +79,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/open-telemetry/opamp-go v0.17.0 h1:3R4+B/6Sy8mknLBbzO3gqloqwTT02rCSRcr4ac2B124= -github.com/open-telemetry/opamp-go v0.17.0/go.mod h1:SGDhUoAx7uGutO4ENNMQla/tiSujxgZmMPJXIOPGBdk= +github.com/open-telemetry/opamp-go v0.18.0 h1:sNHsrBvGU2CMxCB1TRJXncDARrmxDEebx8dsEIawqA4= +github.com/open-telemetry/opamp-go v0.18.0/go.mod h1:9/1G6T5dnJz4cJtoYSr6AX18kHdOxnxxETJPZSHyEUg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/receiver/hostmetricsreceiver/config.go b/receiver/hostmetricsreceiver/config.go index f4adbf86077a..5e6ba246d1c4 100644 --- a/receiver/hostmetricsreceiver/config.go +++ b/receiver/hostmetricsreceiver/config.go @@ -19,7 +19,7 @@ import ( // Config defines configuration for HostMetrics receiver. type Config struct { scraperhelper.ControllerConfig `mapstructure:",squash"` - Scrapers map[component.Type]internal.Config `mapstructure:"-"` + Scrapers map[component.Type]component.Config `mapstructure:"-"` // RootPath is the host's root directory (linux only). RootPath string `mapstructure:"root_path"` @@ -58,7 +58,7 @@ func (cfg *Config) Unmarshal(componentParser *confmap.Conf) error { // dynamically load the individual collector configs based on the key name - cfg.Scrapers = map[component.Type]internal.Config{} + cfg.Scrapers = map[component.Type]component.Config{} scrapersSection, err := componentParser.Sub("scrapers") if err != nil { @@ -84,7 +84,9 @@ func (cfg *Config) Unmarshal(componentParser *confmap.Conf) error { return fmt.Errorf("error reading settings for scraper type %q: %w", key, err) } - scraperCfg.SetRootPath(cfg.RootPath) + if iCfg, ok := scraperCfg.(internal.Config); ok { + iCfg.SetRootPath(cfg.RootPath) + } cfg.Scrapers[key] = scraperCfg } diff --git a/receiver/hostmetricsreceiver/config_test.go b/receiver/hostmetricsreceiver/config_test.go index 48d655eaa0c6..0e2f82727690 100644 --- a/receiver/hostmetricsreceiver/config_test.go +++ b/receiver/hostmetricsreceiver/config_test.go @@ -15,7 +15,6 @@ import ( "go.opentelemetry.io/collector/scraper/scraperhelper" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/metadata" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/diskscraper" @@ -43,8 +42,8 @@ func TestLoadConfig(t *testing.T) { id: component.NewID(metadata.Type), expected: func() component.Config { cfg := createDefaultConfig().(*Config) - cfg.Scrapers = map[component.Type]internal.Config{ - cpuscraper.Type: func() internal.Config { + cfg.Scrapers = map[component.Type]component.Config{ + cpuscraper.Type: func() component.Config { cfg := (&cpuscraper.Factory{}).CreateDefaultConfig() return cfg }(), @@ -60,29 +59,29 @@ func TestLoadConfig(t *testing.T) { CollectionInterval: 30 * time.Second, InitialDelay: time.Second, }, - Scrapers: map[component.Type]internal.Config{ - cpuscraper.Type: func() internal.Config { + Scrapers: map[component.Type]component.Config{ + cpuscraper.Type: func() component.Config { cfg := (&cpuscraper.Factory{}).CreateDefaultConfig() return cfg }(), - diskscraper.Type: func() internal.Config { + diskscraper.Type: func() component.Config { cfg := (&diskscraper.Factory{}).CreateDefaultConfig() return cfg }(), - loadscraper.Type: (func() internal.Config { + loadscraper.Type: (func() component.Config { cfg := (&loadscraper.Factory{}).CreateDefaultConfig() cfg.(*loadscraper.Config).CPUAverage = true return cfg })(), - filesystemscraper.Type: func() internal.Config { + filesystemscraper.Type: func() component.Config { cfg := (&filesystemscraper.Factory{}).CreateDefaultConfig() return cfg }(), - memoryscraper.Type: func() internal.Config { + memoryscraper.Type: func() component.Config { cfg := (&memoryscraper.Factory{}).CreateDefaultConfig() return cfg }(), - networkscraper.Type: (func() internal.Config { + networkscraper.Type: (func() component.Config { cfg := (&networkscraper.Factory{}).CreateDefaultConfig() cfg.(*networkscraper.Config).Include = networkscraper.MatchConfig{ Interfaces: []string{"test1"}, @@ -90,15 +89,15 @@ func TestLoadConfig(t *testing.T) { } return cfg })(), - processesscraper.Type: func() internal.Config { + processesscraper.Type: func() component.Config { cfg := (&processesscraper.Factory{}).CreateDefaultConfig() return cfg }(), - pagingscraper.Type: func() internal.Config { + pagingscraper.Type: func() component.Config { cfg := (&pagingscraper.Factory{}).CreateDefaultConfig() return cfg }(), - processscraper.Type: (func() internal.Config { + processscraper.Type: (func() component.Config { cfg := (&processscraper.Factory{}).CreateDefaultConfig() cfg.(*processscraper.Config).Include = processscraper.MatchConfig{ Names: []string{"test2", "test3"}, @@ -106,7 +105,7 @@ func TestLoadConfig(t *testing.T) { } return cfg })(), - systemscraper.Type: (func() internal.Config { + systemscraper.Type: (func() component.Config { cfg := (&systemscraper.Factory{}).CreateDefaultConfig() return cfg })(), diff --git a/receiver/hostmetricsreceiver/factory.go b/receiver/hostmetricsreceiver/factory.go index 0fac30742796..e1af92a95d48 100644 --- a/receiver/hostmetricsreceiver/factory.go +++ b/receiver/hostmetricsreceiver/factory.go @@ -130,7 +130,7 @@ func createAddScraperOptions( return scraperControllerOptions, nil } -func createHostMetricsScraper(ctx context.Context, set receiver.Settings, key component.Type, cfg internal.Config, factories map[component.Type]internal.ScraperFactory) (s scraper.Metrics, ok bool, err error) { +func createHostMetricsScraper(ctx context.Context, set receiver.Settings, key component.Type, cfg component.Config, factories map[component.Type]internal.ScraperFactory) (s scraper.Metrics, ok bool, err error) { factory := factories[key] if factory == nil { ok = false diff --git a/receiver/hostmetricsreceiver/factory_test.go b/receiver/hostmetricsreceiver/factory_test.go index d9cd23736c4a..7a3ecb047728 100644 --- a/receiver/hostmetricsreceiver/factory_test.go +++ b/receiver/hostmetricsreceiver/factory_test.go @@ -14,8 +14,6 @@ import ( "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/receiver/receivertest" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" ) var creationSet = receivertest.NewNopSettings() @@ -48,7 +46,7 @@ func TestCreateReceiver_ScraperKeyConfigError(t *testing.T) { const errorKey string = "error" factory := NewFactory() - cfg := &Config{Scrapers: map[component.Type]internal.Config{component.MustNewType(errorKey): &mockConfig{}}} + cfg := &Config{Scrapers: map[component.Type]component.Config{component.MustNewType(errorKey): &mockConfig{}}} _, err := factory.CreateMetrics(context.Background(), creationSet, cfg, consumertest.NewNop()) assert.EqualError(t, err, fmt.Sprintf("host metrics scraper factory not found for key: %q", errorKey)) diff --git a/receiver/hostmetricsreceiver/hostmetrics_linux_test.go b/receiver/hostmetricsreceiver/hostmetrics_linux_test.go index 7722e0bd2b12..93288dcf383e 100644 --- a/receiver/hostmetricsreceiver/hostmetrics_linux_test.go +++ b/receiver/hostmetricsreceiver/hostmetrics_linux_test.go @@ -15,7 +15,6 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap/confmaptest" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper" ) @@ -45,8 +44,7 @@ func TestLoadConfigRootPath(t *testing.T) { expectedConfig := factory.CreateDefaultConfig().(*Config) expectedConfig.RootPath = "testdata" cpuScraperCfg := (&cpuscraper.Factory{}).CreateDefaultConfig() - cpuScraperCfg.SetRootPath("testdata") - expectedConfig.Scrapers = map[component.Type]internal.Config{cpuscraper.Type: cpuScraperCfg} + expectedConfig.Scrapers = map[component.Type]component.Config{cpuscraper.Type: cpuScraperCfg} assert.Equal(t, expectedConfig, cfg) expectedEnvMap := common.EnvMap{ common.HostDevEnvKey: "testdata/dev", diff --git a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go index 482bd84bc461..071020e58d68 100644 --- a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go +++ b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go @@ -78,7 +78,7 @@ func TestGatherMetrics_EndToEnd(t *testing.T) { ControllerConfig: scraperhelper.ControllerConfig{ CollectionInterval: 100 * time.Millisecond, }, - Scrapers: map[component.Type]internal.Config{ + Scrapers: map[component.Type]component.Config{ cpuscraper.Type: scraperFactories[cpuscraper.Type].CreateDefaultConfig(), diskscraper.Type: scraperFactories[diskscraper.Type].CreateDefaultConfig(), filesystemscraper.Type: (&filesystemscraper.Factory{}).CreateDefaultConfig(), @@ -186,12 +186,10 @@ var mockType = component.MustNewType("mock") type mockConfig struct{} -func (m *mockConfig) SetRootPath(_ string) {} - type errFactory struct{} -func (m *errFactory) CreateDefaultConfig() internal.Config { return &mockConfig{} } -func (m *errFactory) CreateMetricsScraper(context.Context, receiver.Settings, internal.Config) (scraper.Metrics, error) { +func (m *errFactory) CreateDefaultConfig() component.Config { return &mockConfig{} } +func (m *errFactory) CreateMetricsScraper(context.Context, receiver.Settings, component.Config) (scraper.Metrics, error) { return nil, errors.New("err1") } @@ -202,7 +200,7 @@ func TestGatherMetrics_ScraperKeyConfigError(t *testing.T) { scraperFactories = tmp }() - cfg := &Config{Scrapers: map[component.Type]internal.Config{component.MustNewType("error"): &mockConfig{}}} + cfg := &Config{Scrapers: map[component.Type]component.Config{component.MustNewType("error"): &mockConfig{}}} _, err := NewFactory().CreateMetrics(context.Background(), creationSet, cfg, consumertest.NewNop()) require.Error(t, err) } @@ -215,7 +213,7 @@ func TestGatherMetrics_CreateMetricsScraperError(t *testing.T) { scraperFactories = tmp }() - cfg := &Config{Scrapers: map[component.Type]internal.Config{mockType: &mockConfig{}}} + cfg := &Config{Scrapers: map[component.Type]component.Config{mockType: &mockConfig{}}} _, err := NewFactory().CreateMetrics(context.Background(), creationSet, cfg, consumertest.NewNop()) require.Error(t, err) } @@ -267,7 +265,7 @@ func benchmarkScrapeMetrics(b *testing.B, cfg *Config) { func Benchmark_ScrapeCpuMetrics(b *testing.B) { cfg := &Config{ ControllerConfig: scraperhelper.NewDefaultControllerConfig(), - Scrapers: map[component.Type]internal.Config{cpuscraper.Type: (&cpuscraper.Factory{}).CreateDefaultConfig()}, + Scrapers: map[component.Type]component.Config{cpuscraper.Type: (&cpuscraper.Factory{}).CreateDefaultConfig()}, } benchmarkScrapeMetrics(b, cfg) @@ -276,7 +274,7 @@ func Benchmark_ScrapeCpuMetrics(b *testing.B) { func Benchmark_ScrapeDiskMetrics(b *testing.B) { cfg := &Config{ ControllerConfig: scraperhelper.NewDefaultControllerConfig(), - Scrapers: map[component.Type]internal.Config{diskscraper.Type: (&diskscraper.Factory{}).CreateDefaultConfig()}, + Scrapers: map[component.Type]component.Config{diskscraper.Type: (&diskscraper.Factory{}).CreateDefaultConfig()}, } benchmarkScrapeMetrics(b, cfg) @@ -285,7 +283,7 @@ func Benchmark_ScrapeDiskMetrics(b *testing.B) { func Benchmark_ScrapeFileSystemMetrics(b *testing.B) { cfg := &Config{ ControllerConfig: scraperhelper.NewDefaultControllerConfig(), - Scrapers: map[component.Type]internal.Config{filesystemscraper.Type: (&filesystemscraper.Factory{}).CreateDefaultConfig()}, + Scrapers: map[component.Type]component.Config{filesystemscraper.Type: (&filesystemscraper.Factory{}).CreateDefaultConfig()}, } benchmarkScrapeMetrics(b, cfg) @@ -294,7 +292,7 @@ func Benchmark_ScrapeFileSystemMetrics(b *testing.B) { func Benchmark_ScrapeLoadMetrics(b *testing.B) { cfg := &Config{ ControllerConfig: scraperhelper.NewDefaultControllerConfig(), - Scrapers: map[component.Type]internal.Config{loadscraper.Type: (&loadscraper.Factory{}).CreateDefaultConfig()}, + Scrapers: map[component.Type]component.Config{loadscraper.Type: (&loadscraper.Factory{}).CreateDefaultConfig()}, } benchmarkScrapeMetrics(b, cfg) @@ -303,7 +301,7 @@ func Benchmark_ScrapeLoadMetrics(b *testing.B) { func Benchmark_ScrapeMemoryMetrics(b *testing.B) { cfg := &Config{ ControllerConfig: scraperhelper.NewDefaultControllerConfig(), - Scrapers: map[component.Type]internal.Config{memoryscraper.Type: (&memoryscraper.Factory{}).CreateDefaultConfig()}, + Scrapers: map[component.Type]component.Config{memoryscraper.Type: (&memoryscraper.Factory{}).CreateDefaultConfig()}, } benchmarkScrapeMetrics(b, cfg) @@ -312,7 +310,7 @@ func Benchmark_ScrapeMemoryMetrics(b *testing.B) { func Benchmark_ScrapeNetworkMetrics(b *testing.B) { cfg := &Config{ ControllerConfig: scraperhelper.NewDefaultControllerConfig(), - Scrapers: map[component.Type]internal.Config{networkscraper.Type: (&networkscraper.Factory{}).CreateDefaultConfig()}, + Scrapers: map[component.Type]component.Config{networkscraper.Type: (&networkscraper.Factory{}).CreateDefaultConfig()}, } benchmarkScrapeMetrics(b, cfg) @@ -321,7 +319,7 @@ func Benchmark_ScrapeNetworkMetrics(b *testing.B) { func Benchmark_ScrapeProcessesMetrics(b *testing.B) { cfg := &Config{ ControllerConfig: scraperhelper.NewDefaultControllerConfig(), - Scrapers: map[component.Type]internal.Config{processesscraper.Type: (&processesscraper.Factory{}).CreateDefaultConfig()}, + Scrapers: map[component.Type]component.Config{processesscraper.Type: (&processesscraper.Factory{}).CreateDefaultConfig()}, } benchmarkScrapeMetrics(b, cfg) @@ -330,7 +328,7 @@ func Benchmark_ScrapeProcessesMetrics(b *testing.B) { func Benchmark_ScrapePagingMetrics(b *testing.B) { cfg := &Config{ ControllerConfig: scraperhelper.NewDefaultControllerConfig(), - Scrapers: map[component.Type]internal.Config{pagingscraper.Type: (&pagingscraper.Factory{}).CreateDefaultConfig()}, + Scrapers: map[component.Type]component.Config{pagingscraper.Type: (&pagingscraper.Factory{}).CreateDefaultConfig()}, } benchmarkScrapeMetrics(b, cfg) @@ -343,7 +341,7 @@ func Benchmark_ScrapeProcessMetrics(b *testing.B) { cfg := &Config{ ControllerConfig: scraperhelper.NewDefaultControllerConfig(), - Scrapers: map[component.Type]internal.Config{processscraper.Type: (&processscraper.Factory{}).CreateDefaultConfig()}, + Scrapers: map[component.Type]component.Config{processscraper.Type: (&processscraper.Factory{}).CreateDefaultConfig()}, } benchmarkScrapeMetrics(b, cfg) @@ -356,7 +354,7 @@ func Benchmark_ScrapeUptimeMetrics(b *testing.B) { cfg := &Config{ ControllerConfig: scraperhelper.NewDefaultControllerConfig(), - Scrapers: map[component.Type]internal.Config{systemscraper.Type: (&systemscraper.Factory{}).CreateDefaultConfig()}, + Scrapers: map[component.Type]component.Config{systemscraper.Type: (&systemscraper.Factory{}).CreateDefaultConfig()}, } benchmarkScrapeMetrics(b, cfg) @@ -365,7 +363,7 @@ func Benchmark_ScrapeUptimeMetrics(b *testing.B) { func Benchmark_ScrapeSystemMetrics(b *testing.B) { cfg := &Config{ ControllerConfig: scraperhelper.NewDefaultControllerConfig(), - Scrapers: map[component.Type]internal.Config{ + Scrapers: map[component.Type]component.Config{ cpuscraper.Type: (&cpuscraper.Factory{}).CreateDefaultConfig(), diskscraper.Type: (&diskscraper.Factory{}).CreateDefaultConfig(), filesystemscraper.Type: (&filesystemscraper.Factory{}).CreateDefaultConfig(), @@ -387,16 +385,9 @@ func Benchmark_ScrapeSystemAndProcessMetrics(b *testing.B) { cfg := &Config{ ControllerConfig: scraperhelper.NewDefaultControllerConfig(), - Scrapers: map[component.Type]internal.Config{ - cpuscraper.Type: &cpuscraper.Config{}, - diskscraper.Type: &diskscraper.Config{}, + Scrapers: map[component.Type]component.Config{ filesystemscraper.Type: (&filesystemscraper.Factory{}).CreateDefaultConfig(), - loadscraper.Type: &loadscraper.Config{}, - memoryscraper.Type: &memoryscraper.Config{}, - networkscraper.Type: &networkscraper.Config{}, pagingscraper.Type: (&pagingscraper.Factory{}).CreateDefaultConfig(), - processesscraper.Type: &processesscraper.Config{}, - systemscraper.Type: &systemscraper.Config{}, }, } diff --git a/receiver/hostmetricsreceiver/integration_test.go b/receiver/hostmetricsreceiver/integration_test.go index 9e765e3714f7..a45968059f41 100644 --- a/receiver/hostmetricsreceiver/integration_test.go +++ b/receiver/hostmetricsreceiver/integration_test.go @@ -17,7 +17,6 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/scraperinttest" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/processscraper" ) @@ -41,7 +40,7 @@ func Test_ProcessScrape(t *testing.T) { Config: filterset.Config{MatchType: filterset.Regexp}, Names: []string{"sleep"}, } - rCfg.Scrapers = map[component.Type]internal.Config{ + rCfg.Scrapers = map[component.Type]component.Config{ processscraper.Type: pCfg, } }), @@ -71,8 +70,7 @@ func Test_ProcessScrapeWithCustomRootPath(t *testing.T) { rCfg.CollectionInterval = time.Second rCfg.RootPath = rootPath pCfg := (&processscraper.Factory{}).CreateDefaultConfig().(*processscraper.Config) - pCfg.SetRootPath(rootPath) - rCfg.Scrapers = map[component.Type]internal.Config{ + rCfg.Scrapers = map[component.Type]component.Config{ processscraper.Type: pCfg, } }), @@ -99,12 +97,11 @@ func Test_ProcessScrapeWithBadRootPathAndEnvVar(t *testing.T) { func(_ *testing.T, cfg component.Config, _ *scraperinttest.ContainerInfo) { rCfg := cfg.(*Config) rCfg.CollectionInterval = time.Second + rCfg.RootPath = badRootPath pCfg := (&processscraper.Factory{}).CreateDefaultConfig().(*processscraper.Config) - pCfg.SetRootPath(badRootPath) - rCfg.Scrapers = map[component.Type]internal.Config{ + rCfg.Scrapers = map[component.Type]component.Config{ processscraper.Type: pCfg, } - rCfg.RootPath = badRootPath }), scraperinttest.WithExpectedFile(expectedFile), scraperinttest.WithCompareOptions( diff --git a/receiver/hostmetricsreceiver/internal/scraper.go b/receiver/hostmetricsreceiver/internal/scraper.go index 7cbb8872df32..704faf32232c 100644 --- a/receiver/hostmetricsreceiver/internal/scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper.go @@ -16,11 +16,11 @@ import ( // ScraperFactory can create a MetricScraper. type ScraperFactory interface { // CreateDefaultConfig creates the default configuration for the Scraper. - CreateDefaultConfig() Config + CreateDefaultConfig() component.Config // CreateMetricsScraper creates a scraper based on this config. // If the config is not valid, error will be returned instead. - CreateMetricsScraper(ctx context.Context, settings receiver.Settings, cfg Config) (scraper.Metrics, error) + CreateMetricsScraper(ctx context.Context, settings receiver.Settings, cfg component.Config) (scraper.Metrics, error) } // Config is the configuration of a scraper. @@ -28,14 +28,6 @@ type Config interface { SetRootPath(rootPath string) } -type ScraperConfig struct { - RootPath string `mapstructure:"-"` -} - -func (p *ScraperConfig) SetRootPath(rootPath string) { - p.RootPath = rootPath -} - type EnvVarScraper struct { delegate scraper.Metrics envMap common.EnvMap diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/config.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/config.go index 1509f2606a9d..34adc2e4821e 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/config.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/config.go @@ -4,12 +4,10 @@ package cpuscraper // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper" import ( - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata" ) // Config relating to CPU Metric Scraper. type Config struct { metadata.MetricsBuilderConfig `mapstructure:",squash"` - internal.ScraperConfig } diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory.go index d072159c8a98..f0d0e182c427 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory.go @@ -10,7 +10,6 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/scraper" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata" ) @@ -23,7 +22,7 @@ var Type = component.MustNewType("cpu") type Factory struct{} // CreateDefaultConfig creates the default configuration for the Scraper. -func (f *Factory) CreateDefaultConfig() internal.Config { +func (f *Factory) CreateDefaultConfig() component.Config { return &Config{ MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), } @@ -33,7 +32,7 @@ func (f *Factory) CreateDefaultConfig() internal.Config { func (f *Factory) CreateMetricsScraper( ctx context.Context, settings receiver.Settings, - config internal.Config, + config component.Config, ) (scraper.Metrics, error) { cfg := config.(*Config) s := newCPUScraper(ctx, settings, cfg) diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go index 29048bd3188e..b9121fb423c2 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go @@ -5,7 +5,6 @@ package diskscraper // import "github.com/open-telemetry/opentelemetry-collector import ( "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata" ) @@ -13,7 +12,6 @@ import ( type Config struct { // MetricsbuilderConfig allows to customize scraped metrics/attributes representation. metadata.MetricsBuilderConfig `mapstructure:",squash"` - internal.ScraperConfig // Include specifies a filter on the devices that should be included from the generated metrics. // Exclude specifies a filter on the devices that should be excluded from the generated metrics. diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go index f1e30bd378f6..064e2242bae6 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go @@ -10,7 +10,6 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/scraper" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata" ) @@ -23,7 +22,7 @@ var Type = component.MustNewType("disk") type Factory struct{} // CreateDefaultConfig creates the default configuration for the Scraper. -func (f *Factory) CreateDefaultConfig() internal.Config { +func (f *Factory) CreateDefaultConfig() component.Config { return &Config{ MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), } @@ -33,7 +32,7 @@ func (f *Factory) CreateDefaultConfig() internal.Config { func (f *Factory) CreateMetricsScraper( ctx context.Context, settings receiver.Settings, - config internal.Config, + config component.Config, ) (scraper.Metrics, error) { cfg := config.(*Config) s, err := newDiskScraper(ctx, settings, cfg) diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/config.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/config.go index f8acc4e30b0a..f38adc5ff806 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/config.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/config.go @@ -7,7 +7,6 @@ import ( "fmt" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata" ) @@ -15,7 +14,6 @@ import ( type Config struct { // MetricsBuilderConfig allows to customize scraped metrics/attributes representation. metadata.MetricsBuilderConfig `mapstructure:",squash"` - internal.ScraperConfig // IncludeVirtualFS will also capture filesystems such as tmpfs, ramfs // and other filesystem types that do no have an associated physical device. @@ -37,6 +35,8 @@ type Config struct { // ExcludeMountPoints specifies a filter on the mount points that should be excluded from the generated metrics. // When `root_path` is set, the mount points must be from the host's perspective. ExcludeMountPoints MountPointMatchConfig `mapstructure:"exclude_mount_points"` + + rootPath string `mapstructure:"-"` } type DeviceMatchConfig struct { @@ -67,6 +67,10 @@ type fsFilter struct { filtersExist bool } +func (cfg *Config) SetRootPath(rootPath string) { + cfg.rootPath = rootPath +} + func (cfg *Config) createFilter() (*fsFilter, error) { var err error filter := fsFilter{} diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory.go index 9702d63e1414..250060a9108a 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory.go @@ -11,7 +11,6 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/scraper" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata" ) @@ -24,7 +23,7 @@ var Type = component.MustNewType("filesystem") type Factory struct{} // CreateDefaultConfig creates the default configuration for the Scraper. -func (f *Factory) CreateDefaultConfig() internal.Config { +func (f *Factory) CreateDefaultConfig() component.Config { return &Config{ MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), } @@ -34,11 +33,11 @@ func (f *Factory) CreateDefaultConfig() internal.Config { func (f *Factory) CreateMetricsScraper( ctx context.Context, settings receiver.Settings, - config internal.Config, + config component.Config, ) (scraper.Metrics, error) { cfg := config.(*Config) - if cfg.RootPath == "" { + if cfg.rootPath == "" { inContainer := os.Getpid() == 1 for _, p := range []string{ "/.dockerenv", // Mounted by dockerd when starting a container by default diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go index 0304b3ab9289..9c6cd943ba92 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go @@ -108,7 +108,7 @@ func (s *filesystemsScraper) scrape(ctx context.Context) (pmetric.Metrics, error if !s.fsFilter.includePartition(partition) { continue } - translatedMountpoint := translateMountpoint(ctx, s.config.RootPath, partition.Mountpoint) + translatedMountpoint := translateMountpoint(ctx, s.config.rootPath, partition.Mountpoint) usage, usageErr := s.usage(ctx, translatedMountpoint) if usageErr != nil { errors.AddPartial(0, fmt.Errorf("failed to read usage at %s: %w", translatedMountpoint, usageErr)) diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/config.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/config.go index 996952370448..fae25f263485 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/config.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/config.go @@ -4,7 +4,6 @@ package loadscraper // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/loadscraper" import ( - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata" ) @@ -14,5 +13,4 @@ type Config struct { CPUAverage bool `mapstructure:"cpu_average"` // MetricsBuilderConfig allows to customize scraped metrics/attributes representation. metadata.MetricsBuilderConfig `mapstructure:",squash"` - internal.ScraperConfig } diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory.go index 695884ba1097..79dc32a9d6bd 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory.go @@ -10,7 +10,6 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/scraper" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata" ) @@ -23,7 +22,7 @@ var Type = component.MustNewType("load") type Factory struct{} // CreateDefaultConfig creates the default configuration for the Scraper. -func (f *Factory) CreateDefaultConfig() internal.Config { +func (f *Factory) CreateDefaultConfig() component.Config { return &Config{ MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), } @@ -33,7 +32,7 @@ func (f *Factory) CreateDefaultConfig() internal.Config { func (f *Factory) CreateMetricsScraper( ctx context.Context, settings receiver.Settings, - config internal.Config, + config component.Config, ) (scraper.Metrics, error) { cfg := config.(*Config) s := newLoadScraper(ctx, settings, cfg) diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/config.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/config.go index c273c8dd8a85..ba8e5589993a 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/config.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/config.go @@ -4,12 +4,10 @@ package memoryscraper // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/memoryscraper" import ( - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata" ) // Config relating to Memory Metric Scraper. type Config struct { metadata.MetricsBuilderConfig `mapstructure:",squash"` - internal.ScraperConfig } diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory.go index 3a34a4013704..a966d478ec6b 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory.go @@ -10,7 +10,6 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/scraper" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata" ) @@ -23,7 +22,7 @@ var Type = component.MustNewType("memory") type Factory struct{} // CreateDefaultConfig creates the default configuration for the Scraper. -func (f *Factory) CreateDefaultConfig() internal.Config { +func (f *Factory) CreateDefaultConfig() component.Config { return &Config{ MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), } @@ -33,7 +32,7 @@ func (f *Factory) CreateDefaultConfig() internal.Config { func (f *Factory) CreateMetricsScraper( ctx context.Context, settings receiver.Settings, - config internal.Config, + config component.Config, ) (scraper.Metrics, error) { cfg := config.(*Config) s := newMemoryScraper(ctx, settings, cfg) diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/config.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/config.go index b7fa0728c599..7c888564b5eb 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/config.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/config.go @@ -5,14 +5,12 @@ package networkscraper // import "github.com/open-telemetry/opentelemetry-collec import ( "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata" ) // Config relating to Network Metric Scraper. type Config struct { metadata.MetricsBuilderConfig `mapstructure:",squash"` - internal.ScraperConfig // Include specifies a filter on the network interfaces that should be included from the generated metrics. Include MatchConfig `mapstructure:"include"` // Exclude specifies a filter on the network interfaces that should be excluded from the generated metrics. diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory.go index 1746fd3e93f1..307b12ba32f9 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory.go @@ -10,7 +10,6 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/scraper" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata" ) @@ -23,7 +22,7 @@ var Type = component.MustNewType("network") type Factory struct{} // CreateDefaultConfig creates the default configuration for the Scraper. -func (f *Factory) CreateDefaultConfig() internal.Config { +func (f *Factory) CreateDefaultConfig() component.Config { return &Config{ MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), } @@ -33,7 +32,7 @@ func (f *Factory) CreateDefaultConfig() internal.Config { func (f *Factory) CreateMetricsScraper( ctx context.Context, settings receiver.Settings, - config internal.Config, + config component.Config, ) (scraper.Metrics, error) { cfg := config.(*Config) s, err := newNetworkScraper(ctx, settings, cfg) diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/config.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/config.go index ac64f83ae034..50bc880f73d6 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/config.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/config.go @@ -4,7 +4,6 @@ package pagingscraper // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/pagingscraper" import ( - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata" ) @@ -12,5 +11,4 @@ import ( type Config struct { // MetricsBuilderConfig allows customizing scraped metrics/attributes representation. metadata.MetricsBuilderConfig `mapstructure:",squash"` - internal.ScraperConfig } diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/factory.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/factory.go index 1ec88b5fadff..b85d0278a75c 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/factory.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/factory.go @@ -10,7 +10,6 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/scraper" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata" ) @@ -23,7 +22,7 @@ var Type = component.MustNewType("paging") type Factory struct{} // CreateDefaultConfig creates the default configuration for the Scraper. -func (f *Factory) CreateDefaultConfig() internal.Config { +func (f *Factory) CreateDefaultConfig() component.Config { return &Config{ MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), } @@ -33,7 +32,7 @@ func (f *Factory) CreateDefaultConfig() internal.Config { func (f *Factory) CreateMetricsScraper( ctx context.Context, settings receiver.Settings, - config internal.Config, + config component.Config, ) (scraper.Metrics, error) { cfg := config.(*Config) s := newPagingScraper(ctx, settings, cfg) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/config.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/config.go index 712b769f52e9..ce5beab04d8e 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/config.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/config.go @@ -4,7 +4,6 @@ package processesscraper // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/processesscraper" import ( - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata" ) @@ -12,5 +11,4 @@ import ( type Config struct { // MetricsBuilderConfig allows customizing scraped metrics/attributes representation. metadata.MetricsBuilderConfig `mapstructure:",squash"` - internal.ScraperConfig } diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory.go index b5b6201a22b6..21e73d2835d2 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory.go @@ -10,7 +10,6 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/scraper" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata" ) @@ -23,7 +22,7 @@ var Type = component.MustNewType("processes") type Factory struct{} // CreateDefaultConfig creates the default configuration for the Scraper. -func (f *Factory) CreateDefaultConfig() internal.Config { +func (f *Factory) CreateDefaultConfig() component.Config { return &Config{ MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), } @@ -33,7 +32,7 @@ func (f *Factory) CreateDefaultConfig() internal.Config { func (f *Factory) CreateMetricsScraper( ctx context.Context, settings receiver.Settings, - config internal.Config, + config component.Config, ) (scraper.Metrics, error) { cfg := config.(*Config) s := newProcessesScraper(ctx, settings, cfg) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/config.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/config.go index 60c3ea0b8535..246a025ece1f 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/config.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/config.go @@ -7,7 +7,6 @@ import ( "time" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata" ) @@ -15,7 +14,6 @@ import ( type Config struct { // MetricsBuilderConfig allows to customize scraped metrics/attributes representation. metadata.MetricsBuilderConfig `mapstructure:",squash"` - internal.ScraperConfig // Include specifies a filter on the process names that should be included from the generated metrics. // Exclude specifies a filter on the process names that should be excluded from the generated metrics. // If neither `include` or `exclude` are set, process metrics will be generated for all processes. diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go index 20971d017849..7b68429cd3dd 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go @@ -13,7 +13,6 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/scraper" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata" ) @@ -37,7 +36,7 @@ var ( type Factory struct{} // CreateDefaultConfig creates the default configuration for the Scraper. -func (f *Factory) CreateDefaultConfig() internal.Config { +func (f *Factory) CreateDefaultConfig() component.Config { return &Config{ MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), } @@ -47,7 +46,7 @@ func (f *Factory) CreateDefaultConfig() internal.Config { func (f *Factory) CreateMetricsScraper( _ context.Context, settings receiver.Settings, - cfg internal.Config, + cfg component.Config, ) (scraper.Metrics, error) { if runtime.GOOS != "linux" && runtime.GOOS != "windows" && runtime.GOOS != "darwin" { return nil, errors.New("process scraper only available on Linux, Windows, or MacOS") diff --git a/receiver/hostmetricsreceiver/internal/scraper/systemscraper/config.go b/receiver/hostmetricsreceiver/internal/scraper/systemscraper/config.go index 9c101ad03347..8d902bbc6c1d 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/systemscraper/config.go +++ b/receiver/hostmetricsreceiver/internal/scraper/systemscraper/config.go @@ -4,7 +4,6 @@ package systemscraper // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/systemscraper" import ( - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/systemscraper/internal/metadata" ) @@ -12,5 +11,4 @@ import ( type Config struct { // MetricsBuilderConfig allows to customize scraped metrics/attributes representation. metadata.MetricsBuilderConfig `mapstructure:",squash"` - internal.ScraperConfig } diff --git a/receiver/hostmetricsreceiver/internal/scraper/systemscraper/factory.go b/receiver/hostmetricsreceiver/internal/scraper/systemscraper/factory.go index c163bea26ec4..abd1994a2fd6 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/systemscraper/factory.go +++ b/receiver/hostmetricsreceiver/internal/scraper/systemscraper/factory.go @@ -12,7 +12,6 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/scraper" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/systemscraper/internal/metadata" ) @@ -25,7 +24,7 @@ var Type = component.MustNewType("system") type Factory struct{} // CreateDefaultConfig creates the default configuration for the Scraper. -func (f *Factory) CreateDefaultConfig() internal.Config { +func (f *Factory) CreateDefaultConfig() component.Config { return &Config{ MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), } @@ -35,7 +34,7 @@ func (f *Factory) CreateDefaultConfig() internal.Config { func (f *Factory) CreateMetricsScraper( ctx context.Context, settings receiver.Settings, - cfg internal.Config, + cfg component.Config, ) (scraper.Metrics, error) { if runtime.GOOS != "linux" && runtime.GOOS != "windows" && runtime.GOOS != "darwin" { return nil, errors.New("uptime scraper only available on Linux, Windows, or MacOS")