mirror of
https://github.com/prometheus/prometheus.git
synced 2025-01-11 22:07:27 -08:00
notifier: dispatch to multiple Alertmanagers
This commit extends the notifier to dispatch alert batches to multiple Alertmanagers concurrently. It changes the `-alertmanager.url` flag to accept a comma separated list of URLs and/or to be set multiple times.
This commit is contained in:
parent
0b6e5eb7dd
commit
9baf120cd5
|
@ -19,6 +19,7 @@ import (
|
|||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
@ -48,9 +49,12 @@ var cfg = struct {
|
|||
web web.Options
|
||||
remote remote.Options
|
||||
|
||||
prometheusURL string
|
||||
influxdbURL string
|
||||
}{}
|
||||
alertmanagerURLs stringset
|
||||
prometheusURL string
|
||||
influxdbURL string
|
||||
}{
|
||||
alertmanagerURLs: stringset{},
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.CommandLine.Init(os.Args[0], flag.ContinueOnError)
|
||||
|
@ -202,9 +206,9 @@ func init() {
|
|||
)
|
||||
|
||||
// Alertmanager.
|
||||
cfg.fs.StringVar(
|
||||
&cfg.notifier.AlertmanagerURL, "alertmanager.url", "",
|
||||
"The URL of the alert manager to send notifications to.",
|
||||
cfg.fs.Var(
|
||||
&cfg.alertmanagerURLs, "alertmanager.url",
|
||||
"Comma-separated list of Alertmanager URLs to send notifications to.",
|
||||
)
|
||||
cfg.fs.IntVar(
|
||||
&cfg.notifier.QueueCapacity, "alertmanager.notification-queue-capacity", 10000,
|
||||
|
@ -245,8 +249,11 @@ func parse(args []string) error {
|
|||
if err := parseInfluxdbURL(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateAlertmanagerURL(); err != nil {
|
||||
return err
|
||||
for u := range cfg.alertmanagerURLs {
|
||||
if err := validateAlertmanagerURL(u); err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.notifier.AlertmanagerURLs = cfg.alertmanagerURLs.slice()
|
||||
}
|
||||
|
||||
cfg.remote.InfluxdbPassword = os.Getenv("INFLUXDB_PW")
|
||||
|
@ -303,19 +310,19 @@ func parseInfluxdbURL() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func validateAlertmanagerURL() error {
|
||||
if cfg.notifier.AlertmanagerURL == "" {
|
||||
func validateAlertmanagerURL(u string) error {
|
||||
if u == "" {
|
||||
return nil
|
||||
}
|
||||
if ok := govalidator.IsURL(cfg.notifier.AlertmanagerURL); !ok {
|
||||
return fmt.Errorf("invalid Alertmanager URL: %s", cfg.notifier.AlertmanagerURL)
|
||||
if ok := govalidator.IsURL(u); !ok {
|
||||
return fmt.Errorf("invalid Alertmanager URL: %s", u)
|
||||
}
|
||||
url, err := url.Parse(cfg.notifier.AlertmanagerURL)
|
||||
url, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if url.Scheme == "" {
|
||||
return fmt.Errorf("missing scheme in Alertmanager URL: %s", cfg.notifier.AlertmanagerURL)
|
||||
return fmt.Errorf("missing scheme in Alertmanager URL: %s", u)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -380,3 +387,28 @@ func usage() {
|
|||
panic(fmt.Errorf("error executing usage template: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
type stringset map[string]struct{}
|
||||
|
||||
func (ss stringset) Set(s string) error {
|
||||
for _, v := range strings.Split(s, ",") {
|
||||
v = strings.TrimSpace(v)
|
||||
if v != "" {
|
||||
ss[v] = struct{}{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss stringset) String() string {
|
||||
return strings.Join(ss.slice(), ",")
|
||||
}
|
||||
|
||||
func (ss stringset) slice() []string {
|
||||
slice := make([]string, 0, len(ss))
|
||||
for k := range ss {
|
||||
slice = append(slice, k)
|
||||
}
|
||||
sort.Strings(slice)
|
||||
return slice
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -38,8 +39,9 @@ const (
|
|||
|
||||
// String constants for instrumentation.
|
||||
const (
|
||||
namespace = "prometheus"
|
||||
subsystem = "notifications"
|
||||
namespace = "prometheus"
|
||||
subsystem = "notifications"
|
||||
alertmanagerLabel = "alertmanager"
|
||||
)
|
||||
|
||||
// Notifier is responsible for dispatching alert notifications to an
|
||||
|
@ -53,20 +55,20 @@ type Notifier struct {
|
|||
ctx context.Context
|
||||
cancel func()
|
||||
|
||||
latency prometheus.Summary
|
||||
errors prometheus.Counter
|
||||
latency *prometheus.SummaryVec
|
||||
errors *prometheus.CounterVec
|
||||
sent *prometheus.CounterVec
|
||||
dropped prometheus.Counter
|
||||
sent prometheus.Counter
|
||||
queueLength prometheus.Gauge
|
||||
queueCapacity prometheus.Metric
|
||||
}
|
||||
|
||||
// Options are the configurable parameters of a Handler.
|
||||
type Options struct {
|
||||
AlertmanagerURL string
|
||||
QueueCapacity int
|
||||
Timeout time.Duration
|
||||
ExternalLabels model.LabelSet
|
||||
AlertmanagerURLs []string
|
||||
QueueCapacity int
|
||||
Timeout time.Duration
|
||||
ExternalLabels model.LabelSet
|
||||
}
|
||||
|
||||
// New constructs a neww Notifier.
|
||||
|
@ -80,24 +82,30 @@ func New(o *Options) *Notifier {
|
|||
more: make(chan struct{}, 1),
|
||||
opts: o,
|
||||
|
||||
latency: prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
latency: prometheus.NewSummaryVec(prometheus.SummaryOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "latency_seconds",
|
||||
Help: "Latency quantiles for sending alert notifications (not including dropped notifications).",
|
||||
}),
|
||||
errors: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
},
|
||||
[]string{alertmanagerLabel},
|
||||
),
|
||||
errors: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "errors_total",
|
||||
Help: "Total number of errors sending alert notifications.",
|
||||
}),
|
||||
sent: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
},
|
||||
[]string{alertmanagerLabel},
|
||||
),
|
||||
sent: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "sent_total",
|
||||
Help: "Total number of alerts successfully sent.",
|
||||
}),
|
||||
},
|
||||
[]string{alertmanagerLabel},
|
||||
),
|
||||
dropped: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
|
@ -160,9 +168,11 @@ func (n *Notifier) nextBatch() []*model.Alert {
|
|||
|
||||
// Run dispatches notifications continuously.
|
||||
func (n *Notifier) Run() {
|
||||
numAMs := len(n.opts.AlertmanagerURLs)
|
||||
// Just warn once in the beginning to prevent noisy logs.
|
||||
if n.opts.AlertmanagerURL == "" {
|
||||
log.Warnf("No AlertManager configured, not dispatching any alerts")
|
||||
if numAMs == 0 {
|
||||
log.Warnf("No AlertManagers configured, not dispatching any alerts")
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
|
@ -171,28 +181,21 @@ func (n *Notifier) Run() {
|
|||
return
|
||||
case <-n.more:
|
||||
}
|
||||
|
||||
alerts := n.nextBatch()
|
||||
|
||||
if len(alerts) == 0 {
|
||||
continue
|
||||
}
|
||||
if n.opts.AlertmanagerURL == "" {
|
||||
n.dropped.Add(float64(len(alerts)))
|
||||
continue
|
||||
}
|
||||
if numAMs > 0 {
|
||||
|
||||
begin := time.Now()
|
||||
|
||||
if err := n.send(alerts...); err != nil {
|
||||
log.Errorf("Error sending %d alerts: %s", len(alerts), err)
|
||||
n.errors.Inc()
|
||||
if len(alerts) > 0 {
|
||||
numErrors := n.sendAll(alerts...)
|
||||
// Increment the dropped counter if we could not send
|
||||
// successfully to a single AlertManager.
|
||||
if numErrors == numAMs {
|
||||
n.dropped.Add(float64(len(alerts)))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
n.dropped.Add(float64(len(alerts)))
|
||||
}
|
||||
|
||||
n.latency.Observe(float64(time.Since(begin)) / float64(time.Second))
|
||||
n.sent.Add(float64(len(alerts)))
|
||||
|
||||
// If the queue still has items left, kick off the next iteration.
|
||||
if n.queueLen() > 0 {
|
||||
n.setMore()
|
||||
|
@ -239,11 +242,15 @@ func (n *Notifier) setMore() {
|
|||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) postURL() string {
|
||||
return strings.TrimRight(n.opts.AlertmanagerURL, "/") + alertPushEndpoint
|
||||
func postURL(u string) string {
|
||||
return strings.TrimRight(u, "/") + alertPushEndpoint
|
||||
}
|
||||
|
||||
func (n *Notifier) send(alerts ...*model.Alert) error {
|
||||
// sendAll sends the alerts to all configured Alertmanagers at concurrently.
|
||||
// It returns the number of sends that have failed.
|
||||
func (n *Notifier) sendAll(alerts ...*model.Alert) int {
|
||||
begin := time.Now()
|
||||
|
||||
// Attach external labels before sending alerts.
|
||||
for _, a := range alerts {
|
||||
for ln, lv := range n.opts.ExternalLabels {
|
||||
|
@ -253,36 +260,62 @@ func (n *Notifier) send(alerts ...*model.Alert) error {
|
|||
}
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := json.NewEncoder(&buf).Encode(alerts); err != nil {
|
||||
return err
|
||||
b, err := json.Marshal(alerts)
|
||||
if err != nil {
|
||||
log.Errorf("Encoding alerts failed: %s", err)
|
||||
return len(n.opts.AlertmanagerURLs)
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), n.opts.Timeout)
|
||||
|
||||
resp, err := ctxhttp.Post(ctx, http.DefaultClient, n.postURL(), contentTypeJSON, &buf)
|
||||
if err != nil {
|
||||
send := func(u string) error {
|
||||
resp, err := ctxhttp.Post(ctx, http.DefaultClient, postURL(u), contentTypeJSON, bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode/100 != 2 {
|
||||
return fmt.Errorf("bad response status %v", resp.Status)
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode/100 != 2 {
|
||||
return fmt.Errorf("bad response status %v", resp.Status)
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
numErrors uint64
|
||||
)
|
||||
for _, u := range n.opts.AlertmanagerURLs {
|
||||
wg.Add(1)
|
||||
|
||||
go func(u string) {
|
||||
if err := send(u); err != nil {
|
||||
log.With("alertmanager", u).With("count", fmt.Sprintf("%d", len(alerts))).Errorf("Error sending alerts: %s", err)
|
||||
n.errors.WithLabelValues(u).Inc()
|
||||
atomic.AddUint64(&numErrors, 1)
|
||||
}
|
||||
n.latency.WithLabelValues(u).Observe(float64(time.Since(begin)) / float64(time.Second))
|
||||
n.sent.WithLabelValues(u).Add(float64(len(alerts)))
|
||||
|
||||
wg.Done()
|
||||
}(u)
|
||||
}
|
||||
return nil
|
||||
wg.Wait()
|
||||
|
||||
return int(numErrors)
|
||||
}
|
||||
|
||||
// Stop shuts down the notification handler.
|
||||
func (n *Notifier) Stop() {
|
||||
log.Info("Stopping notification handler...")
|
||||
|
||||
n.cancel()
|
||||
}
|
||||
|
||||
// Describe implements prometheus.Collector.
|
||||
func (n *Notifier) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- n.latency.Desc()
|
||||
ch <- n.errors.Desc()
|
||||
ch <- n.sent.Desc()
|
||||
n.latency.Describe(ch)
|
||||
n.errors.Describe(ch)
|
||||
n.sent.Describe(ch)
|
||||
|
||||
ch <- n.dropped.Desc()
|
||||
ch <- n.queueLength.Desc()
|
||||
ch <- n.queueCapacity.Desc()
|
||||
|
@ -292,9 +325,10 @@ func (n *Notifier) Describe(ch chan<- *prometheus.Desc) {
|
|||
func (n *Notifier) Collect(ch chan<- prometheus.Metric) {
|
||||
n.queueLength.Set(float64(n.queueLen()))
|
||||
|
||||
ch <- n.latency
|
||||
ch <- n.errors
|
||||
ch <- n.sent
|
||||
n.latency.Collect(ch)
|
||||
n.errors.Collect(ch)
|
||||
n.sent.Collect(ch)
|
||||
|
||||
ch <- n.dropped
|
||||
ch <- n.queueLength
|
||||
ch <- n.queueCapacity
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
func TestHandlerPostURL(t *testing.T) {
|
||||
func TestPostURL(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in, out string
|
||||
}{
|
||||
|
@ -50,13 +50,8 @@ func TestHandlerPostURL(t *testing.T) {
|
|||
out: "http://localhost:9093/prefix/api/v1/alerts",
|
||||
},
|
||||
}
|
||||
h := &Notifier{
|
||||
opts: &Options{},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
h.opts.AlertmanagerURL = c.in
|
||||
if res := h.postURL(); res != c.out {
|
||||
if res := postURL(c.in); res != c.out {
|
||||
t.Errorf("Expected post URL %q for %q but got %q", c.out, c.in, res)
|
||||
}
|
||||
}
|
||||
|
@ -119,13 +114,13 @@ func alertsEqual(a, b model.Alerts) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func TestHandlerSend(t *testing.T) {
|
||||
func TestHandlerSendAll(t *testing.T) {
|
||||
var (
|
||||
expected model.Alerts
|
||||
status int
|
||||
expected model.Alerts
|
||||
status1, status2 int
|
||||
)
|
||||
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
f := func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != alertPushEndpoint {
|
||||
t.Fatalf("Bad endpoint %q used, expected %q", r.URL.Path, alertPushEndpoint)
|
||||
}
|
||||
|
@ -140,16 +135,23 @@ func TestHandlerSend(t *testing.T) {
|
|||
t.Errorf("%#v %#v", *alerts[0], *expected[0])
|
||||
t.Fatalf("Unexpected alerts received %v exp %v", alerts, expected)
|
||||
}
|
||||
|
||||
w.WriteHeader(status)
|
||||
}
|
||||
server1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
f(w, r)
|
||||
w.WriteHeader(status1)
|
||||
}))
|
||||
server2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
f(w, r)
|
||||
w.WriteHeader(status2)
|
||||
}))
|
||||
|
||||
defer server.Close()
|
||||
defer server1.Close()
|
||||
defer server2.Close()
|
||||
|
||||
h := New(&Options{
|
||||
AlertmanagerURL: server.URL,
|
||||
Timeout: time.Minute,
|
||||
ExternalLabels: model.LabelSet{"a": "b"},
|
||||
AlertmanagerURLs: []string{server1.URL, server2.URL},
|
||||
Timeout: time.Minute,
|
||||
ExternalLabels: model.LabelSet{"a": "b"},
|
||||
})
|
||||
|
||||
for i := range make([]struct{}, maxBatchSize) {
|
||||
|
@ -166,16 +168,20 @@ func TestHandlerSend(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
status = http.StatusOK
|
||||
|
||||
if err := h.send(h.queue...); err != nil {
|
||||
t.Fatalf("Unexpected error: %s", err)
|
||||
status1 = http.StatusOK
|
||||
status2 = http.StatusOK
|
||||
if ne := h.sendAll(h.queue...); ne != 0 {
|
||||
t.Fatalf("Unexpected number of failed sends: %d", ne)
|
||||
}
|
||||
|
||||
status = 500
|
||||
status1 = http.StatusNotFound
|
||||
if ne := h.sendAll(h.queue...); ne != 1 {
|
||||
t.Fatalf("Unexpected number of failed sends: %d", ne)
|
||||
}
|
||||
|
||||
if err := h.send(h.queue...); err == nil {
|
||||
t.Fatalf("Expected error but got none")
|
||||
status2 = http.StatusInternalServerError
|
||||
if ne := h.sendAll(h.queue...); ne != 2 {
|
||||
t.Fatalf("Unexpected number of failed sends: %d", ne)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -203,9 +209,9 @@ func TestHandlerFull(t *testing.T) {
|
|||
}))
|
||||
|
||||
h := New(&Options{
|
||||
AlertmanagerURL: server.URL,
|
||||
Timeout: time.Second,
|
||||
QueueCapacity: 3 * maxBatchSize,
|
||||
AlertmanagerURLs: []string{server.URL},
|
||||
Timeout: time.Second,
|
||||
QueueCapacity: 3 * maxBatchSize,
|
||||
})
|
||||
|
||||
var alerts model.Alerts
|
||||
|
|
Loading…
Reference in a new issue