mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-25 05:34:05 -08:00
Throttle scraping if a scrape took longer than the configured interval.
The simple algorithm applied here will increase the actual interval incrementally, whenever and as long as the scrape itself takes longer than the configured interval. Once it takes shorter again, the actual interval will iteratively decrease again.
This commit is contained in:
parent
d2ab49c396
commit
5678a86924
|
@ -265,10 +265,18 @@ func (t *target) RunScraper(ingester extraction.Ingester, interval time.Duration
|
|||
case <-t.scraperStopping:
|
||||
return
|
||||
case <-ticker.C:
|
||||
targetIntervalLength.WithLabelValues(interval.String()).Observe(float64(time.Since(t.lastScrape) / time.Second))
|
||||
took := time.Since(t.lastScrape)
|
||||
t.Lock() // Write t.lastScrape requires locking.
|
||||
t.lastScrape = time.Now()
|
||||
t.Unlock()
|
||||
targetIntervalLength.WithLabelValues(interval.String()).Observe(
|
||||
float64(took) / float64(time.Second), // Sub-second precision.
|
||||
)
|
||||
// Throttle the scrape if it took longer than interval - by
|
||||
// sleeping for the time it took longer. This will make the
|
||||
// actual scrape interval increase as long as a scrape takes
|
||||
// longer than the interval we are aiming for.
|
||||
time.Sleep(took - interval)
|
||||
t.scrape(ingester)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue