Do not stop scrapes in progress during reload (#7752)

* Do not stop scrapes in progress during reload.

Signed-off-by: Julien Pivotto <roidelapluie@inuits.eu>
This commit is contained in:
Julien Pivotto 2020-08-07 15:58:16 +02:00 committed by GitHub
parent db57f31325
commit 2899773b01
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 9 additions and 6 deletions

View file

@ -1022,6 +1022,8 @@ mainLoop:
// scrapeAndReport performs a scrape and then appends the result to the storage // scrapeAndReport performs a scrape and then appends the result to the storage
// together with reporting metrics, by using as few appenders as possible. // together with reporting metrics, by using as few appenders as possible.
// In the happy scenario, a single appender is used. // In the happy scenario, a single appender is used.
// This function uses sl.parentCtx instead of sl.ctx on purpose. A scrape should
// only be cancelled on shutdown, not on reloads.
func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last time.Time, errc chan<- error) time.Time { func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last time.Time, errc chan<- error) time.Time {
start := time.Now() start := time.Now()
@ -1039,7 +1041,7 @@ func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last time
var total, added, seriesAdded int var total, added, seriesAdded int
var err, appErr, scrapeErr error var err, appErr, scrapeErr error
app := sl.appender(sl.ctx) app := sl.appender(sl.parentCtx)
defer func() { defer func() {
if err != nil { if err != nil {
app.Rollback() app.Rollback()
@ -1062,7 +1064,7 @@ func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last time
// Add stale markers. // Add stale markers.
if _, _, _, err := sl.append(app, []byte{}, "", start); err != nil { if _, _, _, err := sl.append(app, []byte{}, "", start); err != nil {
app.Rollback() app.Rollback()
app = sl.appender(sl.ctx) app = sl.appender(sl.parentCtx)
level.Warn(sl.l).Log("msg", "Append failed", "err", err) level.Warn(sl.l).Log("msg", "Append failed", "err", err)
} }
if errc != nil { if errc != nil {
@ -1073,7 +1075,7 @@ func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last time
} }
var contentType string var contentType string
scrapeCtx, cancel := context.WithTimeout(sl.ctx, timeout) scrapeCtx, cancel := context.WithTimeout(sl.parentCtx, timeout)
contentType, scrapeErr = sl.scraper.scrape(scrapeCtx, buf) contentType, scrapeErr = sl.scraper.scrape(scrapeCtx, buf)
cancel() cancel()
@ -1097,13 +1099,13 @@ func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last time
total, added, seriesAdded, appErr = sl.append(app, b, contentType, start) total, added, seriesAdded, appErr = sl.append(app, b, contentType, start)
if appErr != nil { if appErr != nil {
app.Rollback() app.Rollback()
app = sl.appender(sl.ctx) app = sl.appender(sl.parentCtx)
level.Debug(sl.l).Log("msg", "Append failed", "err", appErr) level.Debug(sl.l).Log("msg", "Append failed", "err", appErr)
// The append failed, probably due to a parse error or sample limit. // The append failed, probably due to a parse error or sample limit.
// Call sl.append again with an empty scrape to trigger stale markers. // Call sl.append again with an empty scrape to trigger stale markers.
if _, _, _, err := sl.append(app, []byte{}, "", start); err != nil { if _, _, _, err := sl.append(app, []byte{}, "", start); err != nil {
app.Rollback() app.Rollback()
app = sl.appender(sl.ctx) app = sl.appender(sl.parentCtx)
level.Warn(sl.l).Log("msg", "Append failed", "err", err) level.Warn(sl.l).Log("msg", "Append failed", "err", err)
} }
} }

View file

@ -559,9 +559,10 @@ func TestScrapeLoopStop(t *testing.T) {
numScrapes++ numScrapes++
if numScrapes == 2 { if numScrapes == 2 {
go sl.stop() go sl.stop()
<-sl.ctx.Done()
} }
w.Write([]byte("metric_a 42\n")) w.Write([]byte("metric_a 42\n"))
return nil return ctx.Err()
} }
go func() { go func() {