mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
rebased and resolved conflicts with the new Discovery GUI page
Signed-off-by: Krasi Georgiev <krasi.root@gmail.com>
This commit is contained in:
parent
60ef2016d5
commit
587dec9eb9
|
@ -238,11 +238,11 @@ func main() {
|
|||
discoveryManager = discovery.NewManager(log.With(logger, "component", "discovery manager"))
|
||||
scrapeManager = retrieval.NewScrapeManager(log.With(logger, "component", "scrape manager"), fanoutStorage)
|
||||
queryEngine = promql.NewEngine(fanoutStorage, &cfg.queryEngine)
|
||||
ruleManager := rules.NewManager(&rules.ManagerOptions{
|
||||
ruleManager = rules.NewManager(&rules.ManagerOptions{
|
||||
Appendable: fanoutStorage,
|
||||
QueryFunc: rules.EngineQueryFunc(queryEngine),
|
||||
NotifyFunc: sendAlerts(notifier, cfg.web.ExternalURL.String()),
|
||||
Context: ctx,
|
||||
Context: ctxRule,
|
||||
ExternalURL: cfg.web.ExternalURL,
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
Logger: log.With(logger, "component", "rule manager"),
|
||||
|
@ -271,7 +271,7 @@ func main() {
|
|||
cfg.web.Flags[f.Name] = f.Value.String()
|
||||
}
|
||||
|
||||
// Depend on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager
|
||||
// Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager
|
||||
webHandler := web.New(log.With(logger, "component", "web"), &cfg.web)
|
||||
|
||||
// Monitor outgoing connections on default transport with conntrack.
|
||||
|
@ -281,9 +281,9 @@ func main() {
|
|||
|
||||
reloaders := []func(cfg *config.Config) error{
|
||||
remoteStorage.ApplyConfig,
|
||||
discoveryManager.ApplyConfig,
|
||||
webHandler.ApplyConfig,
|
||||
notifier.ApplyConfig,
|
||||
discoveryManager.ApplyConfig,
|
||||
scrapeManager.ApplyConfig,
|
||||
func(cfg *config.Config) error {
|
||||
// Get all rule files matching the configuration oaths.
|
||||
|
|
|
@ -53,7 +53,7 @@ type Discoverer interface {
|
|||
}
|
||||
|
||||
type poolKey struct {
|
||||
set string
|
||||
setName string
|
||||
provider string
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error {
|
|||
m.cancelDiscoverers()
|
||||
for _, scfg := range cfg.ScrapeConfigs {
|
||||
for provName, prov := range m.providersFromConfig(scfg.ServiceDiscoveryConfig) {
|
||||
m.startProvider(ctx, poolKey{set: scfg.JobName, provider: provName}, prov)
|
||||
m.startProvider(ctx, poolKey{setName: scfg.JobName, provider: provName}, prov)
|
||||
}
|
||||
}
|
||||
close(err)
|
||||
|
@ -184,7 +184,7 @@ func (m *Manager) allGroups(pk poolKey) map[string][]*config.TargetGroup {
|
|||
for _, pk := range pKeys {
|
||||
for _, tg := range m.targets[pk] {
|
||||
if tg.Source != "" { // Don't add empty targets.
|
||||
tSetsAll[pk.set] = append(tSetsAll[pk.set], tg)
|
||||
tSetsAll[pk.setName] = append(tSetsAll[pk.setName], tg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -590,7 +590,7 @@ func TestDiscoveryManagerSyncCalls(t *testing.T) {
|
|||
var totalUpdatesCount int
|
||||
for tpName, update := range testCase.updates {
|
||||
provider := newMockDiscoveryProvider(update)
|
||||
discoveryManager.startProvider(ctx, poolKey{set: strconv.Itoa(testIndex), provider: tpName}, provider)
|
||||
discoveryManager.startProvider(ctx, poolKey{setName: strconv.Itoa(testIndex), provider: tpName}, provider)
|
||||
|
||||
if len(update) > 0 {
|
||||
totalUpdatesCount = totalUpdatesCount + len(update)
|
||||
|
@ -674,8 +674,8 @@ scrape_configs:
|
|||
discoveryManager.ApplyConfig(cfg)
|
||||
|
||||
_ = <-discoveryManager.SyncCh()
|
||||
verifyPresence(discoveryManager.targets, poolKey{set: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(discoveryManager.targets, poolKey{set: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true)
|
||||
verifyPresence(discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true)
|
||||
|
||||
sTwo := `
|
||||
scrape_configs:
|
||||
|
@ -689,8 +689,8 @@ scrape_configs:
|
|||
discoveryManager.ApplyConfig(cfg)
|
||||
|
||||
_ = <-discoveryManager.SyncCh()
|
||||
verifyPresence(discoveryManager.targets, poolKey{set: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(discoveryManager.targets, poolKey{set: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false)
|
||||
verifyPresence(discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false)
|
||||
}
|
||||
|
||||
type update struct {
|
||||
|
|
|
@ -92,20 +92,21 @@ func (m *ScrapeManager) ApplyConfig(cfg *config.Config) error {
|
|||
}
|
||||
|
||||
// TargetMap returns map of active and dropped targets and their corresponding scrape config job name.
|
||||
func (tm *TargetManager) TargetMap() map[string][]*Target {
|
||||
tm.mtx.RLock()
|
||||
defer tm.mtx.RUnlock()
|
||||
|
||||
targetsMap := make(map[string][]*Target)
|
||||
for jobName, ps := range tm.targetSets {
|
||||
ps.sp.mtx.RLock()
|
||||
for _, t := range ps.sp.targets {
|
||||
targetsMap[jobName] = append(targetsMap[jobName], t)
|
||||
func (m *ScrapeManager) TargetMap() map[string][]*Target {
|
||||
targetsMap := make(chan map[string][]*Target)
|
||||
m.actionCh <- func() {
|
||||
targets := make(map[string][]*Target)
|
||||
for jobName, sp := range m.scrapePools {
|
||||
sp.mtx.RLock()
|
||||
for _, t := range sp.targets {
|
||||
targets[jobName] = append(targets[jobName], t)
|
||||
}
|
||||
targets[jobName] = append(targets[jobName], sp.droppedTargets...)
|
||||
sp.mtx.RUnlock()
|
||||
}
|
||||
targetsMap[jobName] = append(targetsMap[jobName], ps.sp.droppedTargets...)
|
||||
ps.sp.mtx.RUnlock()
|
||||
targetsMap <- targets
|
||||
}
|
||||
return targetsMap
|
||||
return <-targetsMap
|
||||
}
|
||||
|
||||
// Targets returns the targets currently being scraped.
|
||||
|
|
|
@ -587,7 +587,7 @@ func (h *Handler) rules(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
func (h *Handler) serviceDiscovery(w http.ResponseWriter, r *http.Request) {
|
||||
var index []string
|
||||
targets := h.targetManager.TargetMap()
|
||||
targets := h.scrapeManager.TargetMap()
|
||||
for job := range targets {
|
||||
index = append(index, job)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue