mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
scrape pool doesn't rely on context as Stop() needs to be blocking to prevent Scrape loops trying to write to a closed TSDB storage.
This commit is contained in:
parent
e405e2f1ea
commit
9c61f0e8a0
|
@ -231,16 +231,24 @@ func main() {
|
||||||
|
|
||||||
cfg.queryEngine.Logger = log.With(logger, "component", "query engine")
|
cfg.queryEngine.Logger = log.With(logger, "component", "query engine")
|
||||||
var (
|
var (
|
||||||
notifier = notifier.New(&cfg.notifier, log.With(logger, "component", "notifier"))
|
|
||||||
ctxDiscovery, cancelDiscovery = context.WithCancel(context.Background())
|
|
||||||
discoveryManager = discovery.NewDiscoveryManager(ctxDiscovery, log.With(logger, "component", "discovery manager"))
|
|
||||||
ctxScrape, cancelScrape = context.WithCancel(context.Background())
|
|
||||||
scrapeManager = retrieval.NewScrapeManager(ctxScrape, log.With(logger, "component", "scrape manager"), fanoutStorage)
|
|
||||||
queryEngine = promql.NewEngine(fanoutStorage, &cfg.queryEngine)
|
|
||||||
ctxWeb, cancelWeb = context.WithCancel(context.Background())
|
ctxWeb, cancelWeb = context.WithCancel(context.Background())
|
||||||
webHandler = web.New(log.With(logger, "component", "web"), &cfg.web)
|
ctxDiscovery, cancelDiscovery = context.WithCancel(context.Background())
|
||||||
|
ctxRule = context.Background()
|
||||||
|
|
||||||
|
notifier = notifier.New(&cfg.notifier, log.With(logger, "component", "notifier"))
|
||||||
|
discoveryManager = discovery.NewDiscoveryManager(ctxDiscovery, log.With(logger, "component", "discovery manager"))
|
||||||
|
scrapeManager = retrieval.NewScrapeManager(log.With(logger, "component", "scrape manager"), fanoutStorage)
|
||||||
|
queryEngine = promql.NewEngine(fanoutStorage, &cfg.queryEngine)
|
||||||
|
ruleManager = rules.NewManager(&rules.ManagerOptions{Appendable: fanoutStorage,
|
||||||
|
Notifier: notifier,
|
||||||
|
QueryEngine: queryEngine,
|
||||||
|
Context: ctxRule,
|
||||||
|
ExternalURL: cfg.web.ExternalURL,
|
||||||
|
Logger: log.With(logger, "component", "rule manager"),
|
||||||
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
|
<<<<<<< HEAD
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
ruleManager := rules.NewManager(&rules.ManagerOptions{
|
ruleManager := rules.NewManager(&rules.ManagerOptions{
|
||||||
Appendable: fanoutStorage,
|
Appendable: fanoutStorage,
|
||||||
|
@ -253,6 +261,9 @@ func main() {
|
||||||
})
|
})
|
||||||
|
|
||||||
cfg.web.Context = ctx
|
cfg.web.Context = ctx
|
||||||
|
=======
|
||||||
|
cfg.web.Context = ctxWeb
|
||||||
|
>>>>>>> 95b1dec3... scrape pool doesn't rely on context as Stop() needs to be blocking to prevent Scrape loops trying to write to a closed TSDB storage.
|
||||||
cfg.web.TSDB = localStorage.Get
|
cfg.web.TSDB = localStorage.Get
|
||||||
cfg.web.Storage = fanoutStorage
|
cfg.web.Storage = fanoutStorage
|
||||||
cfg.web.QueryEngine = queryEngine
|
cfg.web.QueryEngine = queryEngine
|
||||||
|
@ -274,6 +285,9 @@ func main() {
|
||||||
cfg.web.Flags[f.Name] = f.Value.String()
|
cfg.web.Flags[f.Name] = f.Value.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Depend on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager
|
||||||
|
webHandler := web.New(log.With(logger, "component", "web"), &cfg.web)
|
||||||
|
|
||||||
// Monitor outgoing connections on default transport with conntrack.
|
// Monitor outgoing connections on default transport with conntrack.
|
||||||
http.DefaultTransport.(*http.Transport).DialContext = conntrack.NewDialContextFunc(
|
http.DefaultTransport.(*http.Transport).DialContext = conntrack.NewDialContextFunc(
|
||||||
conntrack.DialWithTracing(),
|
conntrack.DialWithTracing(),
|
||||||
|
@ -310,17 +324,6 @@ func main() {
|
||||||
|
|
||||||
var g group.Group
|
var g group.Group
|
||||||
{
|
{
|
||||||
g.Add(
|
|
||||||
func() error {
|
|
||||||
err := discoveryManager.Run()
|
|
||||||
level.Info(logger).Log("msg", "Discovery manager stopped")
|
|
||||||
return err
|
|
||||||
},
|
|
||||||
func(err error) {
|
|
||||||
level.Info(logger).Log("msg", "Stopping discovery manager...")
|
|
||||||
cancelDiscovery()
|
|
||||||
},
|
|
||||||
)
|
|
||||||
term := make(chan os.Signal)
|
term := make(chan os.Signal)
|
||||||
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
|
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
|
||||||
cancel := make(chan struct{})
|
cancel := make(chan struct{})
|
||||||
|
@ -341,6 +344,34 @@ func main() {
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
g.Add(
|
||||||
|
func() error {
|
||||||
|
err := discoveryManager.Run()
|
||||||
|
level.Info(logger).Log("msg", "Discovery manager stopped")
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
func(err error) {
|
||||||
|
level.Info(logger).Log("msg", "Stopping discovery manager...")
|
||||||
|
cancelDiscovery()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
{
|
||||||
|
g.Add(
|
||||||
|
func() error {
|
||||||
|
err := scrapeManager.Run(discoveryManager.SyncCh())
|
||||||
|
level.Info(logger).Log("msg", "Scrape manager stopped")
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
func(err error) {
|
||||||
|
// Scrape manager needs to be stopped before closing the local TSDB
|
||||||
|
// so that it doesn't try to write samples to a closed storage.
|
||||||
|
level.Info(logger).Log("msg", "Stopping scrape manager...")
|
||||||
|
scrapeManager.Stop()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
{
|
{
|
||||||
// Make sure that sighup handler is registered with a redirect to the channel before the potentially
|
// Make sure that sighup handler is registered with a redirect to the channel before the potentially
|
||||||
// long and synchronous tsdb init.
|
// long and synchronous tsdb init.
|
||||||
|
@ -482,19 +513,6 @@ func main() {
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
{
|
|
||||||
g.Add(
|
|
||||||
func() error {
|
|
||||||
err := scrapeManager.Run(discoveryManager.SyncCh())
|
|
||||||
level.Info(logger).Log("msg", "Scrape manager stopped")
|
|
||||||
return err
|
|
||||||
},
|
|
||||||
func(err error) {
|
|
||||||
level.Info(logger).Log("msg", "Stopping scrape manager...")
|
|
||||||
cancelScrape()
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
if err := g.Run(); err != nil {
|
if err := g.Run(); err != nil {
|
||||||
level.Error(logger).Log("err", err)
|
level.Error(logger).Log("err", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,7 +87,6 @@ func (m *DiscoveryManager) Run() error {
|
||||||
return m.ctx.Err()
|
return m.ctx.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SyncCh returns a read only channel used by all DiscoveryProviders targetSet updates
|
// SyncCh returns a read only channel used by all DiscoveryProviders targetSet updates
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
package retrieval
|
package retrieval
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/go-kit/kit/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
@ -30,27 +29,27 @@ type Appendable interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewScrapeManager is the ScrapeManager constructor
|
// NewScrapeManager is the ScrapeManager constructor
|
||||||
func NewScrapeManager(ctx context.Context, logger log.Logger, app Appendable) *ScrapeManager {
|
func NewScrapeManager(logger log.Logger, app Appendable) *ScrapeManager {
|
||||||
|
|
||||||
return &ScrapeManager{
|
return &ScrapeManager{
|
||||||
ctx: ctx,
|
|
||||||
append: app,
|
append: app,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
actionCh: make(chan func()),
|
actionCh: make(chan func()),
|
||||||
scrapeConfigs: make(map[string]*config.ScrapeConfig),
|
scrapeConfigs: make(map[string]*config.ScrapeConfig),
|
||||||
scrapePools: make(map[string]*scrapePool),
|
scrapePools: make(map[string]*scrapePool),
|
||||||
|
graceShut: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ScrapeManager maintains a set of scrape pools and manages start/stop cicles
|
// ScrapeManager maintains a set of scrape pools and manages start/stop cicles
|
||||||
// when receiving new target groups form the discovery manager.
|
// when receiving new target groups form the discovery manager.
|
||||||
type ScrapeManager struct {
|
type ScrapeManager struct {
|
||||||
ctx context.Context
|
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
append Appendable
|
append Appendable
|
||||||
scrapeConfigs map[string]*config.ScrapeConfig
|
scrapeConfigs map[string]*config.ScrapeConfig
|
||||||
scrapePools map[string]*scrapePool
|
scrapePools map[string]*scrapePool
|
||||||
actionCh chan func()
|
actionCh chan func()
|
||||||
|
graceShut chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run starts background processing to handle target updates and reload the scraping loops.
|
// Run starts background processing to handle target updates and reload the scraping loops.
|
||||||
|
@ -63,12 +62,20 @@ func (m *ScrapeManager) Run(tsets <-chan map[string][]*config.TargetGroup) error
|
||||||
f()
|
f()
|
||||||
case ts := <-tsets:
|
case ts := <-tsets:
|
||||||
m.reload(ts)
|
m.reload(ts)
|
||||||
case <-m.ctx.Done():
|
case <-m.graceShut:
|
||||||
return m.ctx.Err()
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stop cancels all running scrape pools and blocks until all have exited.
|
||||||
|
func (m *ScrapeManager) Stop() {
|
||||||
|
for _, sp := range m.scrapePools {
|
||||||
|
sp.stop()
|
||||||
|
}
|
||||||
|
close(m.graceShut)
|
||||||
|
}
|
||||||
|
|
||||||
// ApplyConfig resets the manager's target providers and job configurations as defined by the new cfg.
|
// ApplyConfig resets the manager's target providers and job configurations as defined by the new cfg.
|
||||||
func (m *ScrapeManager) ApplyConfig(cfg *config.Config) error {
|
func (m *ScrapeManager) ApplyConfig(cfg *config.Config) error {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
|
@ -123,10 +130,10 @@ func (m *ScrapeManager) reload(t map[string][]*config.TargetGroup) error {
|
||||||
return fmt.Errorf("target set '%v' doesn't have valid config", tsetName)
|
return fmt.Errorf("target set '%v' doesn't have valid config", tsetName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// scrape pool doesn't exist so start a new one
|
// Scrape pool doesn't exist so start a new one.
|
||||||
existing, ok := m.scrapePools[tsetName]
|
existing, ok := m.scrapePools[tsetName]
|
||||||
if !ok {
|
if !ok {
|
||||||
sp := newScrapePool(m.ctx, scrapeConfig, m.append, log.With(m.logger, "scrape_pool", tsetName))
|
sp := newScrapePool(scrapeConfig, m.append, log.With(m.logger, "scrape_pool", tsetName))
|
||||||
m.scrapePools[tsetName] = sp
|
m.scrapePools[tsetName] = sp
|
||||||
sp.Sync(tgroup)
|
sp.Sync(tgroup)
|
||||||
|
|
||||||
|
@ -134,7 +141,7 @@ func (m *ScrapeManager) reload(t map[string][]*config.TargetGroup) error {
|
||||||
existing.Sync(tgroup)
|
existing.Sync(tgroup)
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleanup - check config and cancel the scrape loops if it don't exist in the scrape config
|
// Cleanup - check the config and cancel the scrape loops if it don't exist in the scrape config.
|
||||||
jobs := make(map[string]struct{})
|
jobs := make(map[string]struct{})
|
||||||
|
|
||||||
for k := range m.scrapeConfigs {
|
for k := range m.scrapeConfigs {
|
||||||
|
|
|
@ -117,7 +117,6 @@ func init() {
|
||||||
type scrapePool struct {
|
type scrapePool struct {
|
||||||
appendable Appendable
|
appendable Appendable
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
ctx context.Context
|
|
||||||
|
|
||||||
mtx sync.RWMutex
|
mtx sync.RWMutex
|
||||||
config *config.ScrapeConfig
|
config *config.ScrapeConfig
|
||||||
|
@ -136,7 +135,7 @@ const maxAheadTime = 10 * time.Minute
|
||||||
|
|
||||||
type labelsMutator func(labels.Labels) labels.Labels
|
type labelsMutator func(labels.Labels) labels.Labels
|
||||||
|
|
||||||
func newScrapePool(ctx context.Context, cfg *config.ScrapeConfig, app Appendable, logger log.Logger) *scrapePool {
|
func newScrapePool(cfg *config.ScrapeConfig, app Appendable, logger log.Logger) *scrapePool {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -152,14 +151,15 @@ func newScrapePool(ctx context.Context, cfg *config.ScrapeConfig, app Appendable
|
||||||
sp := &scrapePool{
|
sp := &scrapePool{
|
||||||
appendable: app,
|
appendable: app,
|
||||||
config: cfg,
|
config: cfg,
|
||||||
ctx: ctx,
|
|
||||||
client: client,
|
client: client,
|
||||||
targets: map[uint64]*Target{},
|
targets: map[uint64]*Target{},
|
||||||
loops: map[uint64]loop{},
|
loops: map[uint64]loop{},
|
||||||
logger: logger,
|
logger: logger,
|
||||||
}
|
}
|
||||||
sp.newLoop = func(t *Target, s scraper) loop {
|
sp.newLoop = func(t *Target, s scraper) loop {
|
||||||
return newScrapeLoop(sp.ctx, s,
|
return newScrapeLoop(
|
||||||
|
context.Background(),
|
||||||
|
s,
|
||||||
log.With(logger, "target", t),
|
log.With(logger, "target", t),
|
||||||
buffers,
|
buffers,
|
||||||
func(l labels.Labels) labels.Labels { return sp.mutateSampleLabels(l, t) },
|
func(l labels.Labels) labels.Labels { return sp.mutateSampleLabels(l, t) },
|
||||||
|
@ -189,7 +189,6 @@ func (sp *scrapePool) stop() {
|
||||||
delete(sp.loops, fp)
|
delete(sp.loops, fp)
|
||||||
delete(sp.targets, fp)
|
delete(sp.targets, fp)
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -582,8 +581,7 @@ func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newScrapeLoop(
|
func newScrapeLoop(ctx context.Context,
|
||||||
ctx context.Context,
|
|
||||||
sc scraper,
|
sc scraper,
|
||||||
l log.Logger,
|
l log.Logger,
|
||||||
buffers *pool.BytesPool,
|
buffers *pool.BytesPool,
|
||||||
|
@ -605,8 +603,8 @@ func newScrapeLoop(
|
||||||
sampleMutator: sampleMutator,
|
sampleMutator: sampleMutator,
|
||||||
reportSampleMutator: reportSampleMutator,
|
reportSampleMutator: reportSampleMutator,
|
||||||
stopped: make(chan struct{}),
|
stopped: make(chan struct{}),
|
||||||
ctx: ctx,
|
|
||||||
l: l,
|
l: l,
|
||||||
|
ctx: ctx,
|
||||||
}
|
}
|
||||||
sl.scrapeCtx, sl.cancel = context.WithCancel(ctx)
|
sl.scrapeCtx, sl.cancel = context.WithCancel(ctx)
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ func TestNewScrapePool(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
app = &nopAppendable{}
|
app = &nopAppendable{}
|
||||||
cfg = &config.ScrapeConfig{}
|
cfg = &config.ScrapeConfig{}
|
||||||
sp = newScrapePool(context.Background(), cfg, app, nil)
|
sp = newScrapePool(cfg, app, nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
if a, ok := sp.appendable.(*nopAppendable); !ok || a != app {
|
if a, ok := sp.appendable.(*nopAppendable); !ok || a != app {
|
||||||
|
@ -231,7 +231,7 @@ func TestScrapePoolReload(t *testing.T) {
|
||||||
func TestScrapePoolAppender(t *testing.T) {
|
func TestScrapePoolAppender(t *testing.T) {
|
||||||
cfg := &config.ScrapeConfig{}
|
cfg := &config.ScrapeConfig{}
|
||||||
app := &nopAppendable{}
|
app := &nopAppendable{}
|
||||||
sp := newScrapePool(context.Background(), cfg, app, nil)
|
sp := newScrapePool(cfg, app, nil)
|
||||||
|
|
||||||
wrapped := sp.appender()
|
wrapped := sp.appender()
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue