Continue scraping old targets on SD fail.

When we have trouble resolving the targets for a job via service
discovery, we shouldn't just stop scraping the targets we currently
have.
This commit is contained in:
Julius Volz 2013-07-12 22:38:02 +02:00
parent 8f0a3a060c
commit 9a48f57b66

View file

@ -127,10 +127,10 @@ func (p *TargetPool) runIteration(results chan<- *extraction.Result, interval ti
if p.targetProvider != nil { if p.targetProvider != nil {
targets, err := p.targetProvider.Targets() targets, err := p.targetProvider.Targets()
if err != nil { if err != nil {
log.Printf("Error looking up targets: %s", err) log.Printf("Error looking up targets, keeping old list: %s", err)
return } else {
p.ReplaceTargets(targets)
} }
p.ReplaceTargets(targets)
} }
p.RLock() p.RLock()