mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-25 05:34:05 -08:00
Merge pull request #7761 from roidelapluie/reload
Log duration of reloads
This commit is contained in:
commit
db57f31325
|
@ -436,56 +436,73 @@ func main() {
|
|||
conntrack.DialWithTracing(),
|
||||
)
|
||||
|
||||
reloaders := []func(cfg *config.Config) error{
|
||||
remoteStorage.ApplyConfig,
|
||||
webHandler.ApplyConfig,
|
||||
func(cfg *config.Config) error {
|
||||
if cfg.GlobalConfig.QueryLogFile == "" {
|
||||
queryEngine.SetQueryLogger(nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
l, err := logging.NewJSONFileLogger(cfg.GlobalConfig.QueryLogFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
queryEngine.SetQueryLogger(l)
|
||||
return nil
|
||||
},
|
||||
// The Scrape and notifier managers need to reload before the Discovery manager as
|
||||
// they need to read the most updated config when receiving the new targets list.
|
||||
scrapeManager.ApplyConfig,
|
||||
func(cfg *config.Config) error {
|
||||
c := make(map[string]sd_config.ServiceDiscoveryConfig)
|
||||
for _, v := range cfg.ScrapeConfigs {
|
||||
c[v.JobName] = v.ServiceDiscoveryConfig
|
||||
}
|
||||
return discoveryManagerScrape.ApplyConfig(c)
|
||||
},
|
||||
notifierManager.ApplyConfig,
|
||||
func(cfg *config.Config) error {
|
||||
c := make(map[string]sd_config.ServiceDiscoveryConfig)
|
||||
for k, v := range cfg.AlertingConfig.AlertmanagerConfigs.ToMap() {
|
||||
c[k] = v.ServiceDiscoveryConfig
|
||||
}
|
||||
return discoveryManagerNotify.ApplyConfig(c)
|
||||
},
|
||||
func(cfg *config.Config) error {
|
||||
// Get all rule files matching the configuration paths.
|
||||
var files []string
|
||||
for _, pat := range cfg.RuleFiles {
|
||||
fs, err := filepath.Glob(pat)
|
||||
if err != nil {
|
||||
// The only error can be a bad pattern.
|
||||
return errors.Wrapf(err, "error retrieving rule files for %s", pat)
|
||||
reloaders := []reloader{
|
||||
{
|
||||
name: "remote_storage",
|
||||
reloader: remoteStorage.ApplyConfig,
|
||||
}, {
|
||||
name: "web_handler",
|
||||
reloader: webHandler.ApplyConfig,
|
||||
}, {
|
||||
name: "query_engine",
|
||||
reloader: func(cfg *config.Config) error {
|
||||
if cfg.GlobalConfig.QueryLogFile == "" {
|
||||
queryEngine.SetQueryLogger(nil)
|
||||
return nil
|
||||
}
|
||||
files = append(files, fs...)
|
||||
}
|
||||
return ruleManager.Update(
|
||||
time.Duration(cfg.GlobalConfig.EvaluationInterval),
|
||||
files,
|
||||
cfg.GlobalConfig.ExternalLabels,
|
||||
)
|
||||
|
||||
l, err := logging.NewJSONFileLogger(cfg.GlobalConfig.QueryLogFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
queryEngine.SetQueryLogger(l)
|
||||
return nil
|
||||
},
|
||||
}, {
|
||||
// The Scrape and notifier managers need to reload before the Discovery manager as
|
||||
// they need to read the most updated config when receiving the new targets list.
|
||||
name: "scrape",
|
||||
reloader: scrapeManager.ApplyConfig,
|
||||
}, {
|
||||
name: "scrape_sd",
|
||||
reloader: func(cfg *config.Config) error {
|
||||
c := make(map[string]sd_config.ServiceDiscoveryConfig)
|
||||
for _, v := range cfg.ScrapeConfigs {
|
||||
c[v.JobName] = v.ServiceDiscoveryConfig
|
||||
}
|
||||
return discoveryManagerScrape.ApplyConfig(c)
|
||||
},
|
||||
}, {
|
||||
name: "notify",
|
||||
reloader: notifierManager.ApplyConfig,
|
||||
}, {
|
||||
name: "notify_sd",
|
||||
reloader: func(cfg *config.Config) error {
|
||||
c := make(map[string]sd_config.ServiceDiscoveryConfig)
|
||||
for k, v := range cfg.AlertingConfig.AlertmanagerConfigs.ToMap() {
|
||||
c[k] = v.ServiceDiscoveryConfig
|
||||
}
|
||||
return discoveryManagerNotify.ApplyConfig(c)
|
||||
},
|
||||
}, {
|
||||
name: "rules",
|
||||
reloader: func(cfg *config.Config) error {
|
||||
// Get all rule files matching the configuration paths.
|
||||
var files []string
|
||||
for _, pat := range cfg.RuleFiles {
|
||||
fs, err := filepath.Glob(pat)
|
||||
if err != nil {
|
||||
// The only error can be a bad pattern.
|
||||
return errors.Wrapf(err, "error retrieving rule files for %s", pat)
|
||||
}
|
||||
files = append(files, fs...)
|
||||
}
|
||||
return ruleManager.Update(
|
||||
time.Duration(cfg.GlobalConfig.EvaluationInterval),
|
||||
files,
|
||||
cfg.GlobalConfig.ExternalLabels,
|
||||
)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -821,7 +838,14 @@ func (i *safePromQLNoStepSubqueryInterval) Get(int64) int64 {
|
|||
return i.value.Load()
|
||||
}
|
||||
|
||||
func reloadConfig(filename string, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...func(*config.Config) error) (err error) {
|
||||
type reloader struct {
|
||||
name string
|
||||
reloader func(*config.Config) error
|
||||
}
|
||||
|
||||
func reloadConfig(filename string, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) {
|
||||
start := time.Now()
|
||||
timings := []interface{}{}
|
||||
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)
|
||||
|
||||
defer func() {
|
||||
|
@ -840,17 +864,20 @@ func reloadConfig(filename string, logger log.Logger, noStepSuqueryInterval *saf
|
|||
|
||||
failed := false
|
||||
for _, rl := range rls {
|
||||
if err := rl(conf); err != nil {
|
||||
rstart := time.Now()
|
||||
if err := rl.reloader(conf); err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to apply configuration", "err", err)
|
||||
failed = true
|
||||
}
|
||||
timings = append(timings, rl.name, time.Since(rstart))
|
||||
}
|
||||
if failed {
|
||||
return errors.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
|
||||
}
|
||||
|
||||
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
|
||||
level.Info(logger).Log("msg", "Completed loading of configuration file", "filename", filename)
|
||||
l := []interface{}{"msg", "Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)}
|
||||
level.Info(logger).Log(append(l, timings...)...)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue