mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
parent
090e7e0959
commit
ffa673f7d8
|
@ -420,10 +420,7 @@ func main() {
|
|||
// it needs to read a valid config for each job.
|
||||
// It depends on the config being in sync with the discovery manager so
|
||||
// we wait until the config is fully loaded.
|
||||
select {
|
||||
case <-reloadReady.C:
|
||||
break
|
||||
}
|
||||
<-reloadReady.C
|
||||
|
||||
err := scrapeManager.Run(discoveryManagerScrape.SyncCh())
|
||||
level.Info(logger).Log("msg", "Scrape manager stopped")
|
||||
|
@ -445,10 +442,7 @@ func main() {
|
|||
cancel := make(chan struct{})
|
||||
g.Add(
|
||||
func() error {
|
||||
select {
|
||||
case <-reloadReady.C:
|
||||
break
|
||||
}
|
||||
<-reloadReady.C
|
||||
|
||||
for {
|
||||
select {
|
||||
|
@ -573,10 +567,8 @@ func main() {
|
|||
// it needs to read a valid config for each job.
|
||||
// It depends on the config being in sync with the discovery manager
|
||||
// so we wait until the config is fully loaded.
|
||||
select {
|
||||
case <-reloadReady.C:
|
||||
break
|
||||
}
|
||||
<-reloadReady.C
|
||||
|
||||
notifier.Run(discoveryManagerNotify.SyncCh())
|
||||
level.Info(logger).Log("msg", "Notifier manager stopped")
|
||||
return nil
|
||||
|
|
|
@ -65,7 +65,6 @@ func (i *fakeInformer) GetController() cache.Controller {
|
|||
}
|
||||
|
||||
func (i *fakeInformer) Run(stopCh <-chan struct{}) {
|
||||
return
|
||||
}
|
||||
|
||||
func (i *fakeInformer) HasSynced() bool {
|
||||
|
|
|
@ -90,13 +90,11 @@ type Manager struct {
|
|||
|
||||
// Run starts the background processing
|
||||
func (m *Manager) Run() error {
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
m.cancelDiscoverers()
|
||||
return m.ctx.Err()
|
||||
}
|
||||
for range m.ctx.Done() {
|
||||
m.cancelDiscoverers()
|
||||
return m.ctx.Err()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncCh returns a read only channel used by all Discoverers to send target updates.
|
||||
|
|
|
@ -750,7 +750,7 @@ scrape_configs:
|
|||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
_ = <-discoveryManager.SyncCh()
|
||||
<-discoveryManager.SyncCh()
|
||||
verifyPresence(discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true)
|
||||
|
||||
|
@ -769,7 +769,7 @@ scrape_configs:
|
|||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
_ = <-discoveryManager.SyncCh()
|
||||
<-discoveryManager.SyncCh()
|
||||
verifyPresence(discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false)
|
||||
}
|
||||
|
|
|
@ -100,11 +100,7 @@ func (c *Client) Write(samples model.Samples) error {
|
|||
}
|
||||
|
||||
_, err = conn.Write(buf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Name identifies the client as a Graphite client.
|
||||
|
|
|
@ -348,7 +348,6 @@ func (p *parser) recover(errp *error) {
|
|||
*errp = e.(error)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// stmt parses any statement.
|
||||
|
|
|
@ -286,12 +286,12 @@ func (g *Group) copyState(from *Group) {
|
|||
ruleMap := make(map[string][]int, len(from.rules))
|
||||
|
||||
for fi, fromRule := range from.rules {
|
||||
l, _ := ruleMap[fromRule.Name()]
|
||||
l := ruleMap[fromRule.Name()]
|
||||
ruleMap[fromRule.Name()] = append(l, fi)
|
||||
}
|
||||
|
||||
for i, rule := range g.rules {
|
||||
indexes, _ := ruleMap[rule.Name()]
|
||||
indexes := ruleMap[rule.Name()]
|
||||
if len(indexes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -139,9 +139,7 @@ func (m *Manager) DroppedTargets() []*Target {
|
|||
var droppedTargets []*Target
|
||||
for _, p := range m.scrapePools {
|
||||
p.mtx.RLock()
|
||||
for _, tt := range p.droppedTargets {
|
||||
droppedTargets = append(droppedTargets, tt)
|
||||
}
|
||||
droppedTargets = append(droppedTargets, p.droppedTargets...)
|
||||
p.mtx.RUnlock()
|
||||
}
|
||||
return droppedTargets
|
||||
|
|
|
@ -196,8 +196,7 @@ func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels m
|
|||
// sample on the floor if the queue is full.
|
||||
// Always returns nil.
|
||||
func (t *QueueManager) Append(s *model.Sample) error {
|
||||
var snew model.Sample
|
||||
snew = *s
|
||||
snew := *s
|
||||
snew.Metric = s.Metric.Clone()
|
||||
|
||||
for ln, lv := range t.externalLabels {
|
||||
|
|
|
@ -99,8 +99,7 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var q storage.Queryable
|
||||
q = QueryableClient(c)
|
||||
q := QueryableClient(c)
|
||||
q = ExternablLabelsHandler(q, conf.GlobalConfig.ExternalLabels)
|
||||
if len(rrConf.RequiredMatchers) > 0 {
|
||||
q = RequiredMatchersFilter(q, labelsToEqualityMatchers(rrConf.RequiredMatchers))
|
||||
|
|
|
@ -157,7 +157,7 @@ func TestNewClientFromConfig(t *testing.T) {
|
|||
},
|
||||
handler: func(w http.ResponseWriter, r *http.Request) {
|
||||
username, password, ok := r.BasicAuth()
|
||||
if ok == false {
|
||||
if !ok {
|
||||
fmt.Fprintf(w, "The Authorization header wasn't set")
|
||||
} else if ExpectedUsername != username {
|
||||
fmt.Fprintf(w, "The expected username (%s) differs from the obtained username (%s).", ExpectedUsername, username)
|
||||
|
@ -336,7 +336,7 @@ func TestBasicAuthRoundTripper(t *testing.T) {
|
|||
|
||||
fakeRoundTripper := testutil.NewRoundTripCheckRequest(func(req *http.Request) {
|
||||
username, password, ok := req.BasicAuth()
|
||||
if ok == false {
|
||||
if !ok {
|
||||
t.Errorf("The Authorization header wasn't set")
|
||||
}
|
||||
if ExpectedUsername != username {
|
||||
|
|
|
@ -45,8 +45,7 @@ func TestQueryStatsWithTimers(t *testing.T) {
|
|||
time.Sleep(2 * time.Millisecond)
|
||||
timer.Stop()
|
||||
|
||||
var qs *QueryStats
|
||||
qs = NewQueryStats(tg)
|
||||
qs := NewQueryStats(tg)
|
||||
actual, err := json.Marshal(qs)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error during serialization: %v", err)
|
||||
|
|
|
@ -20,11 +20,7 @@ func ErrorEqual(left, right error) bool {
|
|||
}
|
||||
|
||||
if left != nil && right != nil {
|
||||
if left.Error() == right.Error() {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
return left.Error() == right.Error()
|
||||
}
|
||||
|
||||
return false
|
||||
|
|
|
@ -346,10 +346,7 @@ func (h *Handler) Ready() {
|
|||
// Verifies whether the server is ready or not.
|
||||
func (h *Handler) isReady() bool {
|
||||
ready := atomic.LoadUint32(&h.ready)
|
||||
if ready == 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return ready > 0
|
||||
}
|
||||
|
||||
// Checks if server is ready, calls f if it is, returns 503 if it is not.
|
||||
|
|
Loading…
Reference in a new issue