Skip to content

Commit

Permalink
[receiver/prometheus] Fix static scrape config with Target Allocator (#…
Browse files Browse the repository at this point in the history
…36063)

#### Description

Instead of clearing out prometheus scrape configuration I changed the
code to copy the initial config every time we sync it from Target
Allocator

#### Link to tracking issue

Fixes
#36062
and open-telemetry/opentelemetry-operator#3034

<!--Describe what testing was performed and which tests were added.-->

#### Testing

tested in kind cluster with custom image

<!--Describe the documentation added.-->
#### Documentation

<!--Please delete paragraphs that you did not use before submitting.-->

Co-authored-by: David Ashpole <[email protected]>
  • Loading branch information
povilasv and dashpole authored Nov 18, 2024
1 parent 2c7daec commit b14856e
Show file tree
Hide file tree
Showing 3 changed files with 126 additions and 2 deletions.
27 changes: 27 additions & 0 deletions .chloggen/ta.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: bug_fix

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: prometheusreceiver

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: "Fix prometheus receiver to support static scrape config with Target Allocator"

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [36062]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: [user]
9 changes: 7 additions & 2 deletions receiver/prometheusreceiver/targetallocator/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ type Manager struct {
shutdown chan struct{}
cfg *Config
promCfg *promconfig.Config
initialScrapeConfigs []*promconfig.ScrapeConfig
scrapeManager *scrape.Manager
discoveryManager *discovery.Manager
enableNativeHistograms bool
Expand All @@ -43,6 +44,7 @@ func NewManager(set receiver.Settings, cfg *Config, promCfg *promconfig.Config,
settings: set,
cfg: cfg,
promCfg: promCfg,
initialScrapeConfigs: promCfg.ScrapeConfigs,
enableNativeHistograms: enableNativeHistograms,
}
}
Expand Down Expand Up @@ -115,8 +117,11 @@ func (m *Manager) sync(compareHash uint64, httpClient *http.Client) (uint64, err
return hash, nil
}

// Clear out the current configurations
m.promCfg.ScrapeConfigs = []*promconfig.ScrapeConfig{}
// Copy initial scrape configurations
initialConfig := make([]*promconfig.ScrapeConfig, len(m.initialScrapeConfigs))
copy(initialConfig, m.initialScrapeConfigs)

m.promCfg.ScrapeConfigs = initialConfig

for jobName, scrapeConfig := range scrapeConfigsResponse {
var httpSD promHTTP.SDConfig
Expand Down
92 changes: 92 additions & 0 deletions receiver/prometheusreceiver/targetallocator/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -833,6 +833,98 @@ func TestConfigureSDHTTPClientConfigFromTA(t *testing.T) {
assert.NoError(t, err)
}

func TestManagerSyncWithInitialScrapeConfigs(t *testing.T) {
ctx := context.Background()
initialScrapeConfigs := []*promconfig.ScrapeConfig{
{
JobName: "job1",
HonorTimestamps: true,
ScrapeInterval: model.Duration(30 * time.Second),
ScrapeTimeout: model.Duration(30 * time.Second),
MetricsPath: "/metrics",
Scheme: "http",
},
{
JobName: "job2",
HonorTimestamps: true,
ScrapeInterval: model.Duration(30 * time.Second),
ScrapeTimeout: model.Duration(30 * time.Second),
MetricsPath: "/metrics",
Scheme: "http",
},
}

// Mock target allocator response
mockResponse := Responses{
responses: map[string][]mockTargetAllocatorResponseRaw{
"/scrape_configs": {
mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{
"job1": {
"job_name": "job3",
"scrape_interval": "30s",
"scrape_timeout": "30s",
"scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"},
"metrics_path": "/metrics",
"scheme": "http",
"relabel_configs": nil,
"metric_relabel_configs": nil,
},
}},
},
"/jobs/job1/targets": {
mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{
{Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"},
Labels: map[model.LabelName]model.LabelValue{
"__meta_datacenter": "london",
"__meta_prometheus_job": "node",
}},
}},
mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{
{Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"},
Labels: map[model.LabelName]model.LabelValue{
"__meta_datacenter": "london",
"__meta_prometheus_job": "node",
}},
}},
},
},
}

cfg := &Config{
Interval: 10 * time.Second,
CollectorID: "collector-1",
HTTPSDConfig: &PromHTTPSDConfig{
HTTPClientConfig: commonconfig.HTTPClientConfig{},
RefreshInterval: model.Duration(60 * time.Second),
},
}

allocator, err := setupMockTargetAllocator(mockResponse)
require.NoError(t, err, "Failed to create allocator")

allocator.Start()
defer allocator.Stop()
cfg.Endpoint = allocator.srv.URL // set service URL with the automatic generated one
scrapeManager, discoveryManager := initPrometheusManagers(ctx, t)

baseCfg := promconfig.Config{GlobalConfig: promconfig.DefaultGlobalConfig, ScrapeConfigs: initialScrapeConfigs}
manager := NewManager(receivertest.NewNopSettings(), cfg, &baseCfg, false)
require.NoError(t, manager.Start(ctx, componenttest.NewNopHost(), scrapeManager, discoveryManager))

allocator.wg.Wait()

providers := discoveryManager.Providers()

require.NotNil(t, providers)
require.Len(t, providers, 2)
require.IsType(t, &promHTTP.Discovery{}, providers[1].Discoverer())

require.Len(t, manager.promCfg.ScrapeConfigs, 3)
require.Equal(t, "job1", manager.promCfg.ScrapeConfigs[0].JobName)
require.Equal(t, "job2", manager.promCfg.ScrapeConfigs[1].JobName)
require.Equal(t, "job3", manager.promCfg.ScrapeConfigs[2].JobName)
}

func initPrometheusManagers(ctx context.Context, t *testing.T) (*scrape.Manager, *discovery.Manager) {
logger := log.NewNopLogger()
reg := prometheus.NewRegistry()
Expand Down

0 comments on commit b14856e

Please sign in to comment.