Continue scraping old targets on SD fail.

When we have trouble resolving the targets for a job via service
discovery, we shouldn't just stop scraping the targets we currently
have.
This commit is contained in:
Julius Volz 2013-07-12 22:38:02 +02:00
parent 8f0a3a060c
commit 9a48f57b66

View file

@ -127,11 +127,11 @@ func (p *TargetPool) runIteration(results chan<- *extraction.Result, interval ti
if p.targetProvider != nil {
targets, err := p.targetProvider.Targets()
if err != nil {
log.Printf("Error looking up targets: %s", err)
return
}
log.Printf("Error looking up targets, keeping old list: %s", err)
} else {
p.ReplaceTargets(targets)
}
}
p.RLock()
defer p.RUnlock()