mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-24 21:24:05 -08:00
Keep relabeled scrape interval and timeout on reloads (#10916)
* Preserve relabeled scrape interval and timeout on reloads Signed-off-by: Xiaonan Shen <s@sxn.dev>
This commit is contained in:
parent
c2b4de3611
commit
0c3abdc26d
|
@ -405,8 +405,10 @@ scrape_configs:
|
|||
return noopLoop()
|
||||
}
|
||||
sp := &scrapePool{
|
||||
appendable: &nopAppendable{},
|
||||
activeTargets: map[uint64]*Target{},
|
||||
appendable: &nopAppendable{},
|
||||
activeTargets: map[uint64]*Target{
|
||||
1: {},
|
||||
},
|
||||
loops: map[uint64]loop{
|
||||
1: noopLoop(),
|
||||
},
|
||||
|
|
|
@ -426,8 +426,9 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
cache = newScrapeCache()
|
||||
}
|
||||
|
||||
t := sp.activeTargets[fp]
|
||||
interval, timeout, err := t.intervalAndTimeout(interval, timeout)
|
||||
var (
|
||||
t = sp.activeTargets[fp]
|
||||
s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit}
|
||||
newLoop = sp.newLoop(scrapeLoopOptions{
|
||||
target: t,
|
||||
|
@ -442,6 +443,9 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
timeout: timeout,
|
||||
})
|
||||
)
|
||||
if err != nil {
|
||||
newLoop.setForcedError(err)
|
||||
}
|
||||
wg.Add(1)
|
||||
|
||||
go func(oldLoop, newLoop loop) {
|
||||
|
|
|
@ -325,6 +325,40 @@ func TestScrapePoolReload(t *testing.T) {
|
|||
require.Equal(t, numTargets, len(sp.loops), "Unexpected number of stopped loops after reload")
|
||||
}
|
||||
|
||||
func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
|
||||
reloadCfg := &config.ScrapeConfig{
|
||||
ScrapeInterval: model.Duration(3 * time.Second),
|
||||
ScrapeTimeout: model.Duration(2 * time.Second),
|
||||
}
|
||||
newLoop := func(opts scrapeLoopOptions) loop {
|
||||
l := &testLoop{interval: time.Duration(opts.interval), timeout: time.Duration(opts.timeout)}
|
||||
l.startFunc = func(interval, timeout time.Duration, errc chan<- error) {
|
||||
require.Equal(t, 5*time.Second, interval, "Unexpected scrape interval")
|
||||
require.Equal(t, 3*time.Second, timeout, "Unexpected scrape timeout")
|
||||
}
|
||||
return l
|
||||
}
|
||||
sp := &scrapePool{
|
||||
appendable: &nopAppendable{},
|
||||
activeTargets: map[uint64]*Target{
|
||||
1: {
|
||||
labels: labels.FromStrings(model.ScrapeIntervalLabel, "5s", model.ScrapeTimeoutLabel, "3s"),
|
||||
},
|
||||
},
|
||||
loops: map[uint64]loop{
|
||||
1: noopLoop(),
|
||||
},
|
||||
newLoop: newLoop,
|
||||
logger: nil,
|
||||
client: http.DefaultClient,
|
||||
}
|
||||
|
||||
err := sp.reload(reloadCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to reload configuration: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScrapePoolTargetLimit(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
// On starting to run, new loops created on reload check whether their preceding
|
||||
|
|
Loading…
Reference in a new issue