mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge 354a993f3b
into 677efa4678
This commit is contained in:
commit
997910db21
|
@ -1,6 +1,7 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
## unreleased
|
## unreleased
|
||||||
|
* [ENHANCEMENT] Scraping: add warning if targets relabel to same labels. This is enabled under the feature-flag `warn-if-targets-relabelled-to-same-labels`. #9589
|
||||||
|
|
||||||
## 3.2.1 / 2025-02-25
|
## 3.2.1 / 2025-02-25
|
||||||
|
|
||||||
|
|
|
@ -264,6 +264,9 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
||||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||||
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||||
logger.Info("Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
logger.Info("Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||||
|
case "warn-if-targets-relabelled-to-same-labels":
|
||||||
|
c.scrape.EnableWarnIfTargetsRelabelledToSameLabels = true
|
||||||
|
logger.Info("Enabled warning if targets relabelled to same labels")
|
||||||
case "delayed-compaction":
|
case "delayed-compaction":
|
||||||
c.tsdb.EnableDelayedCompaction = true
|
c.tsdb.EnableDelayedCompaction = true
|
||||||
logger.Info("Experimental delayed compaction is enabled.")
|
logger.Info("Experimental delayed compaction is enabled.")
|
||||||
|
|
|
@ -90,6 +90,9 @@ type Options struct {
|
||||||
// Optional HTTP client options to use when scraping.
|
// Optional HTTP client options to use when scraping.
|
||||||
HTTPClientOptions []config_util.HTTPClientOption
|
HTTPClientOptions []config_util.HTTPClientOption
|
||||||
|
|
||||||
|
// Option to warn if targets relabelled to same labels
|
||||||
|
EnableWarnIfTargetsRelabelledToSameLabels bool
|
||||||
|
|
||||||
// private option for testability.
|
// private option for testability.
|
||||||
skipOffsetting bool
|
skipOffsetting bool
|
||||||
}
|
}
|
||||||
|
@ -205,6 +208,40 @@ func (m *Manager) reload() {
|
||||||
}
|
}
|
||||||
m.mtxScrape.Unlock()
|
m.mtxScrape.Unlock()
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
|
if m.opts.EnableWarnIfTargetsRelabelledToSameLabels {
|
||||||
|
m.warnIfTargetsRelabelledToSameLabels()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) warnIfTargetsRelabelledToSameLabels() {
|
||||||
|
m.mtxScrape.Lock()
|
||||||
|
defer m.mtxScrape.Unlock()
|
||||||
|
|
||||||
|
totalTargets := 0
|
||||||
|
for _, scrapePool := range m.scrapePools {
|
||||||
|
totalTargets += len(scrapePool.activeTargets)
|
||||||
|
}
|
||||||
|
|
||||||
|
activeTargets := make(map[string]*Target, totalTargets)
|
||||||
|
buf := [1024]byte{}
|
||||||
|
builder := labels.NewBuilder(labels.EmptyLabels())
|
||||||
|
for _, scrapePool := range m.scrapePools {
|
||||||
|
for _, target := range scrapePool.activeTargets {
|
||||||
|
lStr := string(target.labels.Bytes(buf[:]))
|
||||||
|
t, ok := activeTargets[lStr]
|
||||||
|
if !ok {
|
||||||
|
activeTargets[lStr] = target
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
m.logger.Warn(
|
||||||
|
"Found targets with same labels after relabelling",
|
||||||
|
"target_one", t.DiscoveredLabels(builder).Get(model.AddressLabel),
|
||||||
|
"target_two", target.DiscoveredLabels(builder).Get(model.AddressLabel),
|
||||||
|
"labels", target.labels.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// setOffsetSeed calculates a global offsetSeed per server relying on extra label set.
|
// setOffsetSeed calculates a global offsetSeed per server relying on extra label set.
|
||||||
|
|
|
@ -622,6 +622,43 @@ func TestManagerTargetsUpdates(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestManagerDuplicateAfterRelabellingWarning(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := &buf
|
||||||
|
logger := promslog.New(&promslog.Config{Writer: writer})
|
||||||
|
|
||||||
|
opts := Options{EnableWarnIfTargetsRelabelledToSameLabels: true}
|
||||||
|
testRegistry := prometheus.NewRegistry()
|
||||||
|
m, err := NewManager(&opts, logger, nil, nil, testRegistry)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
m.scrapePools = map[string]*scrapePool{}
|
||||||
|
sp := &scrapePool{
|
||||||
|
activeTargets: map[uint64]*Target{},
|
||||||
|
}
|
||||||
|
targetScrapeCfg := &config.ScrapeConfig{
|
||||||
|
Scheme: "https",
|
||||||
|
MetricsPath: "/metrics",
|
||||||
|
JobName: "job",
|
||||||
|
ScrapeInterval: model.Duration(time.Second),
|
||||||
|
ScrapeTimeout: model.Duration(time.Second),
|
||||||
|
}
|
||||||
|
sp.activeTargets[uint64(0)] = &Target{
|
||||||
|
scrapeConfig: targetScrapeCfg,
|
||||||
|
tLabels: map[model.LabelName]model.LabelValue{model.AddressLabel: "foo"},
|
||||||
|
}
|
||||||
|
sp.activeTargets[uint64(1)] = &Target{
|
||||||
|
scrapeConfig: targetScrapeCfg,
|
||||||
|
tLabels: map[model.LabelName]model.LabelValue{model.AddressLabel: "bar"},
|
||||||
|
}
|
||||||
|
m.scrapePools["default"] = sp
|
||||||
|
|
||||||
|
m.reload()
|
||||||
|
require.Contains(t, buf.String(), "Found targets with same labels after relabelling")
|
||||||
|
require.Contains(t, buf.String(), "foo")
|
||||||
|
require.Contains(t, buf.String(), "bar")
|
||||||
|
}
|
||||||
|
|
||||||
func TestSetOffsetSeed(t *testing.T) {
|
func TestSetOffsetSeed(t *testing.T) {
|
||||||
getConfig := func(prometheus string) *config.Config {
|
getConfig := func(prometheus string) *config.Config {
|
||||||
cfgText := `
|
cfgText := `
|
||||||
|
|
Loading…
Reference in a new issue