notifier: Consider alert relabeling when deciding if alerts are dropped

When intentionally dropping all alerts in a batch, `SendAll` returns
false, increasing the dropped_total metric.

This makes it difficult to tell if there is a connection issue between
Prometheus and AlertManager or a result of intentionally dropping alerts.

Have `SendAll` return `true` when no batches were sent by keeping track
of the number of AlertManager request attempts.

If no attempts were made, then the send is successful.

Fixes: #15978

Signed-off-by: Justin Cichra <jcichra@cloudflare.com>
This commit is contained in:
Justin Cichra 2025-02-26 09:05:40 -05:00
parent cb096a8ea8
commit 78599d0ec6
2 changed files with 74 additions and 18 deletions

View file

@ -36,7 +36,6 @@ import (
"github.com/prometheus/common/promslog"
"github.com/prometheus/common/version"
"github.com/prometheus/sigv4"
"go.uber.org/atomic"
"gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/config"
@ -552,10 +551,10 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
n.mtx.RUnlock()
var (
wg sync.WaitGroup
numSuccess atomic.Uint64
wg sync.WaitGroup
amSetCovered sync.Map
)
for _, ams := range amSets {
for k, ams := range amSets {
var (
payload []byte
err error
@ -611,24 +610,28 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
cachedPayload = nil
}
// Being here means len(ams.ams) > 0
amSetCovered.Store(k, false)
for _, am := range ams.ams {
wg.Add(1)
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ams.cfg.Timeout))
defer cancel()
go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) {
if err := n.sendOne(ctx, client, url, payload); err != nil {
go func(ctx context.Context, k string, client *http.Client, url string, payload []byte, count int) {
err := n.sendOne(ctx, client, url, payload)
if err != nil {
n.logger.Error("Error sending alerts", "alertmanager", url, "count", count, "err", err)
n.metrics.errors.WithLabelValues(url).Add(float64(count))
} else {
numSuccess.Inc()
amSetCovered.CompareAndSwap(k, false, true)
}
n.metrics.latency.WithLabelValues(url).Observe(time.Since(begin).Seconds())
n.metrics.sent.WithLabelValues(url).Add(float64(count))
wg.Done()
}(ctx, ams.client, am.url().String(), payload, len(amAlerts))
}(ctx, k, ams.client, am.url().String(), payload, len(amAlerts))
}
ams.mtx.RUnlock()
@ -636,7 +639,18 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
wg.Wait()
return numSuccess.Load() > 0
// Return false if there are any sets which were attempted (e.g. not filtered
// out) but have no successes.
allAmSetsCovered := true
amSetCovered.Range(func(_, value any) bool {
if !value.(bool) {
allAmSetsCovered = false
return false
}
return true
})
return allAmSetsCovered
}
func alertsToOpenAPIAlerts(alerts []*Alert) models.PostableAlerts {

View file

@ -140,17 +140,20 @@ func newTestHTTPServerBuilder(expected *[]*Alert, errc chan<- error, u, p string
func TestHandlerSendAll(t *testing.T) {
var (
errc = make(chan error, 1)
expected = make([]*Alert, 0, maxBatchSize)
status1, status2 atomic.Int32
errc = make(chan error, 1)
expected = make([]*Alert, 0, maxBatchSize)
status1, status2, status3 atomic.Int32
)
status1.Store(int32(http.StatusOK))
status2.Store(int32(http.StatusOK))
status3.Store(int32(http.StatusOK))
server1 := newTestHTTPServerBuilder(&expected, errc, "prometheus", "testing_password", &status1)
server2 := newTestHTTPServerBuilder(&expected, errc, "", "", &status2)
server3 := newTestHTTPServerBuilder(&expected, errc, "", "", &status3)
defer server1.Close()
defer server2.Close()
defer server3.Close()
h := NewManager(&Options{}, nil)
@ -170,6 +173,9 @@ func TestHandlerSendAll(t *testing.T) {
am2Cfg := config.DefaultAlertmanagerConfig
am2Cfg.Timeout = model.Duration(time.Second)
am3Cfg := config.DefaultAlertmanagerConfig
am3Cfg.Timeout = model.Duration(time.Second)
h.alertmanagers["1"] = &alertmanagerSet{
ams: []alertmanager{
alertmanagerMock{
@ -185,10 +191,18 @@ func TestHandlerSendAll(t *testing.T) {
alertmanagerMock{
urlf: func() string { return server2.URL },
},
alertmanagerMock{
urlf: func() string { return server3.URL },
},
},
cfg: &am2Cfg,
}
h.alertmanagers["3"] = &alertmanagerSet{
ams: []alertmanager{}, // empty set
cfg: &am3Cfg,
}
for i := range make([]struct{}, maxBatchSize) {
h.queue = append(h.queue, &Alert{
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
@ -207,14 +221,25 @@ func TestHandlerSendAll(t *testing.T) {
}
}
// all ams in all sets are up
require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
checkNoErr()
// the only am in set 1 is down
status1.Store(int32(http.StatusNotFound))
require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
require.False(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
checkNoErr()
// reset it
status1.Store(int32(http.StatusOK))
// only one of the ams in set 2 is down
status2.Store(int32(http.StatusInternalServerError))
require.True(t, h.sendAll(h.queue...), "all sends succeeded unexpectedly")
checkNoErr()
// both ams in set 2 are down
status3.Store(int32(http.StatusInternalServerError))
require.False(t, h.sendAll(h.queue...), "all sends succeeded unexpectedly")
checkNoErr()
}
@ -226,13 +251,15 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) {
expected2 = make([]*Alert, 0, maxBatchSize)
expected3 = make([]*Alert, 0)
statusOK atomic.Int32
status1, status2, status3 atomic.Int32
)
statusOK.Store(int32(http.StatusOK))
status1.Store(int32(http.StatusOK))
status2.Store(int32(http.StatusOK))
status3.Store(int32(http.StatusOK))
server1 := newTestHTTPServerBuilder(&expected1, errc, "", "", &statusOK)
server2 := newTestHTTPServerBuilder(&expected2, errc, "", "", &statusOK)
server3 := newTestHTTPServerBuilder(&expected3, errc, "", "", &statusOK)
server1 := newTestHTTPServerBuilder(&expected1, errc, "", "", &status1)
server2 := newTestHTTPServerBuilder(&expected2, errc, "", "", &status2)
server3 := newTestHTTPServerBuilder(&expected3, errc, "", "", &status3)
defer server1.Close()
defer server2.Close()
@ -331,6 +358,21 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) {
}
}
// all ams are up
require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
checkNoErr()
// the only am in set 1 goes down
status1.Store(int32(http.StatusInternalServerError))
require.False(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
checkNoErr()
// reset set 1
status1.Store(int32(http.StatusOK))
// set 3 loses its only am, but all alerts were dropped
// so there was nothing to send, keeping sendAll true
status3.Store(int32(http.StatusInternalServerError))
require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
checkNoErr()