mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-26 22:19:40 -08:00
notifier: use dynamic service discovery
This commit is contained in:
parent
183c5749b9
commit
f210d96497
|
@ -47,6 +47,7 @@ var cfg = struct {
|
||||||
storage local.MemorySeriesStorageOptions
|
storage local.MemorySeriesStorageOptions
|
||||||
localStorageEngine string
|
localStorageEngine string
|
||||||
notifier notifier.Options
|
notifier notifier.Options
|
||||||
|
notifierTimeout time.Duration
|
||||||
queryEngine promql.EngineOptions
|
queryEngine promql.EngineOptions
|
||||||
web web.Options
|
web web.Options
|
||||||
remote remote.Options
|
remote remote.Options
|
||||||
|
@ -228,7 +229,7 @@ func init() {
|
||||||
"The capacity of the queue for pending alert manager notifications.",
|
"The capacity of the queue for pending alert manager notifications.",
|
||||||
)
|
)
|
||||||
cfg.fs.DurationVar(
|
cfg.fs.DurationVar(
|
||||||
&cfg.notifier.Timeout, "alertmanager.timeout", 10*time.Second,
|
&cfg.notifierTimeout, "alertmanager.timeout", 10*time.Second,
|
||||||
"Alert manager HTTP API timeout.",
|
"Alert manager HTTP API timeout.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -283,7 +284,6 @@ func parse(args []string) error {
|
||||||
if err := validateAlertmanagerURL(u); err != nil {
|
if err := validateAlertmanagerURL(u); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cfg.notifier.AlertmanagerURLs = cfg.alertmanagerURLs.slice()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.remote.InfluxdbPassword = os.Getenv("INFLUXDB_PW")
|
cfg.remote.InfluxdbPassword = os.Getenv("INFLUXDB_PW")
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
|
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
@ -25,6 +26,7 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
"github.com/prometheus/common/log"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
@ -259,6 +261,31 @@ func reloadConfig(filename string, rls ...Reloadable) (err error) {
|
||||||
return fmt.Errorf("couldn't load configuration (-config.file=%s): %v", filename, err)
|
return fmt.Errorf("couldn't load configuration (-config.file=%s): %v", filename, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add AlertmanagerConfigs for legacy Alertmanager URL flags.
|
||||||
|
for us := range cfg.alertmanagerURLs {
|
||||||
|
u, err := url.Parse(us)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
acfg := &config.AlertmanagersConfig{
|
||||||
|
Scheme: u.Scheme,
|
||||||
|
PathPrefix: u.Path,
|
||||||
|
Timeout: cfg.notifierTimeout,
|
||||||
|
ServiceDiscoveryConfig: config.ServiceDiscoveryConfig{
|
||||||
|
StaticConfigs: []*config.TargetGroup{
|
||||||
|
{
|
||||||
|
Targets: []model.LabelSet{
|
||||||
|
{
|
||||||
|
model.AddressLabel: model.LabelValue(u.Host),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
conf.AlertingConfig.AlertmanagersConfigs = append(conf.AlertingConfig.AlertmanagersConfigs, acfg)
|
||||||
|
}
|
||||||
|
|
||||||
failed := false
|
failed := false
|
||||||
for _, rl := range rls {
|
for _, rl := range rls {
|
||||||
if err := rl.ApplyConfig(conf); err != nil {
|
if err := rl.ApplyConfig(conf); err != nil {
|
||||||
|
|
|
@ -17,7 +17,10 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
@ -30,7 +33,9 @@ import (
|
||||||
"golang.org/x/net/context/ctxhttp"
|
"golang.org/x/net/context/ctxhttp"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/relabel"
|
"github.com/prometheus/prometheus/relabel"
|
||||||
|
"github.com/prometheus/prometheus/retrieval"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -62,15 +67,16 @@ type Notifier struct {
|
||||||
dropped prometheus.Counter
|
dropped prometheus.Counter
|
||||||
queueLength prometheus.Gauge
|
queueLength prometheus.Gauge
|
||||||
queueCapacity prometheus.Metric
|
queueCapacity prometheus.Metric
|
||||||
|
|
||||||
|
alertmanagers []*alertmanagerSet
|
||||||
|
cancelDiscovery func()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options are the configurable parameters of a Handler.
|
// Options are the configurable parameters of a Handler.
|
||||||
type Options struct {
|
type Options struct {
|
||||||
AlertmanagerURLs []string
|
QueueCapacity int
|
||||||
QueueCapacity int
|
ExternalLabels model.LabelSet
|
||||||
Timeout time.Duration
|
RelabelConfigs []*config.RelabelConfig
|
||||||
ExternalLabels model.LabelSet
|
|
||||||
RelabelConfigs []*config.RelabelConfig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New constructs a new Notifier.
|
// New constructs a new Notifier.
|
||||||
|
@ -139,6 +145,31 @@ func (n *Notifier) ApplyConfig(conf *config.Config) error {
|
||||||
|
|
||||||
n.opts.ExternalLabels = conf.GlobalConfig.ExternalLabels
|
n.opts.ExternalLabels = conf.GlobalConfig.ExternalLabels
|
||||||
n.opts.RelabelConfigs = conf.AlertingConfig.AlertRelabelConfigs
|
n.opts.RelabelConfigs = conf.AlertingConfig.AlertRelabelConfigs
|
||||||
|
|
||||||
|
amSets := []*alertmanagerSet{}
|
||||||
|
ctx, cancel := context.WithCancel(n.ctx)
|
||||||
|
|
||||||
|
for _, cfg := range conf.AlertingConfig.AlertmanagersConfigs {
|
||||||
|
ams, err := newAlertmanagerSet(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
amSets = append(amSets, ams)
|
||||||
|
}
|
||||||
|
|
||||||
|
// After all sets were created successfully, start them and cancel the
|
||||||
|
// old ones.
|
||||||
|
for _, ams := range amSets {
|
||||||
|
go ams.ts.Run(ctx)
|
||||||
|
ams.ts.UpdateProviders(discovery.ProvidersFromConfig(ams.cfg.ServiceDiscoveryConfig))
|
||||||
|
}
|
||||||
|
if n.cancelDiscovery != nil {
|
||||||
|
n.cancelDiscovery()
|
||||||
|
}
|
||||||
|
|
||||||
|
n.cancelDiscovery = cancel
|
||||||
|
n.alertmanagers = amSets
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,13 +201,6 @@ func (n *Notifier) nextBatch() []*model.Alert {
|
||||||
|
|
||||||
// Run dispatches notifications continuously.
|
// Run dispatches notifications continuously.
|
||||||
func (n *Notifier) Run() {
|
func (n *Notifier) Run() {
|
||||||
numAMs := len(n.opts.AlertmanagerURLs)
|
|
||||||
// Just warn once in the beginning to prevent noisy logs.
|
|
||||||
if numAMs == 0 {
|
|
||||||
log.Warnf("No AlertManagers configured, not dispatching any alerts")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-n.ctx.Done():
|
case <-n.ctx.Done():
|
||||||
|
@ -185,17 +209,7 @@ func (n *Notifier) Run() {
|
||||||
}
|
}
|
||||||
alerts := n.nextBatch()
|
alerts := n.nextBatch()
|
||||||
|
|
||||||
if numAMs > 0 {
|
if !n.sendAll(alerts...) {
|
||||||
|
|
||||||
if len(alerts) > 0 {
|
|
||||||
numErrors := n.sendAll(alerts...)
|
|
||||||
// Increment the dropped counter if we could not send
|
|
||||||
// successfully to a single AlertManager.
|
|
||||||
if numErrors == numAMs {
|
|
||||||
n.dropped.Add(float64(len(alerts)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
n.dropped.Add(float64(len(alerts)))
|
n.dropped.Add(float64(len(alerts)))
|
||||||
}
|
}
|
||||||
// If the queue still has items left, kick off the next iteration.
|
// If the queue still has items left, kick off the next iteration.
|
||||||
|
@ -267,58 +281,54 @@ func (n *Notifier) setMore() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func postURL(u string) string {
|
|
||||||
return strings.TrimRight(u, "/") + alertPushEndpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendAll sends the alerts to all configured Alertmanagers at concurrently.
|
// sendAll sends the alerts to all configured Alertmanagers at concurrently.
|
||||||
// It returns the number of sends that have failed.
|
// It returns the number of sends that have failed and true if all failed.
|
||||||
func (n *Notifier) sendAll(alerts ...*model.Alert) int {
|
func (n *Notifier) sendAll(alerts ...*model.Alert) bool {
|
||||||
begin := time.Now()
|
begin := time.Now()
|
||||||
|
|
||||||
b, err := json.Marshal(alerts)
|
b, err := json.Marshal(alerts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Encoding alerts failed: %s", err)
|
log.Errorf("Encoding alerts failed: %s", err)
|
||||||
return len(n.opts.AlertmanagerURLs)
|
return false
|
||||||
}
|
}
|
||||||
ctx, _ := context.WithTimeout(context.Background(), n.opts.Timeout)
|
|
||||||
|
|
||||||
send := func(u string) error {
|
n.mtx.RLock()
|
||||||
resp, err := ctxhttp.Post(ctx, http.DefaultClient, postURL(u), contentTypeJSON, bytes.NewReader(b))
|
amSets := n.alertmanagers
|
||||||
if err != nil {
|
n.mtx.RUnlock()
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
// Any HTTP status 2xx is OK.
|
|
||||||
if resp.StatusCode/100 != 2 {
|
|
||||||
return fmt.Errorf("bad response status %v", resp.Status)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
numErrors uint64
|
numSuccess uint64
|
||||||
)
|
)
|
||||||
for _, u := range n.opts.AlertmanagerURLs {
|
for _, ams := range amSets {
|
||||||
wg.Add(1)
|
ams.mtx.RLock()
|
||||||
|
|
||||||
go func(u string) {
|
for _, am := range ams.ams {
|
||||||
if err := send(u); err != nil {
|
wg.Add(1)
|
||||||
log.With("alertmanager", u).With("count", fmt.Sprintf("%d", len(alerts))).Errorf("Error sending alerts: %s", err)
|
|
||||||
n.errors.WithLabelValues(u).Inc()
|
|
||||||
atomic.AddUint64(&numErrors, 1)
|
|
||||||
}
|
|
||||||
n.latency.WithLabelValues(u).Observe(time.Since(begin).Seconds())
|
|
||||||
n.sent.WithLabelValues(u).Add(float64(len(alerts)))
|
|
||||||
|
|
||||||
wg.Done()
|
ctx, cancel := context.WithTimeout(n.ctx, ams.cfg.Timeout)
|
||||||
}(u)
|
defer cancel()
|
||||||
|
|
||||||
|
go func(am alertmanager) {
|
||||||
|
u := am.url()
|
||||||
|
|
||||||
|
if err := am.send(ctx, ams.client, b); err != nil {
|
||||||
|
log.With("alertmanager", u).With("count", len(alerts)).Errorf("Error sending alerts: %s", err)
|
||||||
|
n.errors.WithLabelValues(u).Inc()
|
||||||
|
} else {
|
||||||
|
atomic.AddUint64(&numSuccess, 1)
|
||||||
|
}
|
||||||
|
n.latency.WithLabelValues(u).Observe(time.Since(begin).Seconds())
|
||||||
|
n.sent.WithLabelValues(u).Add(float64(len(alerts)))
|
||||||
|
|
||||||
|
wg.Done()
|
||||||
|
}(am)
|
||||||
|
}
|
||||||
|
ams.mtx.RUnlock()
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
return int(numErrors)
|
return numSuccess > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop shuts down the notification handler.
|
// Stop shuts down the notification handler.
|
||||||
|
@ -350,3 +360,161 @@ func (n *Notifier) Collect(ch chan<- prometheus.Metric) {
|
||||||
ch <- n.queueLength
|
ch <- n.queueLength
|
||||||
ch <- n.queueCapacity
|
ch <- n.queueCapacity
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// alertmanager holds all necessary information to send alerts
|
||||||
|
// to an Alertmanager endpoint.
|
||||||
|
type alertmanager struct {
|
||||||
|
plainURL string // test injection hook
|
||||||
|
labels model.LabelSet
|
||||||
|
}
|
||||||
|
|
||||||
|
const pathLabel = "__alerts_path__"
|
||||||
|
|
||||||
|
func (a alertmanager) url() string {
|
||||||
|
if a.plainURL != "" {
|
||||||
|
return a.plainURL
|
||||||
|
}
|
||||||
|
u := &url.URL{
|
||||||
|
Scheme: string(a.labels[model.SchemeLabel]),
|
||||||
|
Host: string(a.labels[model.AddressLabel]),
|
||||||
|
Path: string(a.labels[pathLabel]),
|
||||||
|
}
|
||||||
|
return u.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a alertmanager) send(ctx context.Context, c *http.Client, b []byte) error {
|
||||||
|
resp, err := ctxhttp.Post(ctx, c, a.url(), contentTypeJSON, bytes.NewReader(b))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Any HTTP status 2xx is OK.
|
||||||
|
if resp.StatusCode/100 != 2 {
|
||||||
|
return fmt.Errorf("bad response status %v", resp.Status)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// alertmanagerSet contains a set of Alertmanagers discovered via a group of service
|
||||||
|
// discovery definitions that have a common configuration on how alerts should be sent.
|
||||||
|
type alertmanagerSet struct {
|
||||||
|
ts *discovery.TargetSet
|
||||||
|
cfg *config.AlertmanagersConfig
|
||||||
|
client *http.Client
|
||||||
|
|
||||||
|
mtx sync.RWMutex
|
||||||
|
ams []alertmanager
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAlertmanagerSet(cfg *config.AlertmanagersConfig) (*alertmanagerSet, error) {
|
||||||
|
client, err := retrieval.NewHTTPClient(cfg.HTTPClientConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s := &alertmanagerSet{
|
||||||
|
client: client,
|
||||||
|
cfg: cfg,
|
||||||
|
}
|
||||||
|
s.ts = discovery.NewTargetSet(s)
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync extracts a deduplicated set of Alertmanager endpoints from a list
|
||||||
|
// of target groups definitions.
|
||||||
|
func (s *alertmanagerSet) Sync(tgs []*config.TargetGroup) {
|
||||||
|
all := []alertmanager{}
|
||||||
|
|
||||||
|
for _, tg := range tgs {
|
||||||
|
ams, err := alertmanagerFromGroup(tg, s.cfg)
|
||||||
|
if err != nil {
|
||||||
|
log.With("err", err).Error("generating discovered Alertmanagers failed")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
all = append(all, ams...)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mtx.Lock()
|
||||||
|
defer s.mtx.Unlock()
|
||||||
|
// Set new Alertmanagers and deduplicate them along their unique URL.
|
||||||
|
s.ams = []alertmanager{}
|
||||||
|
seen := map[string]struct{}{}
|
||||||
|
|
||||||
|
for _, am := range all {
|
||||||
|
us := am.url()
|
||||||
|
if _, ok := seen[us]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
seen[us] = struct{}{}
|
||||||
|
s.ams = append(s.ams, am)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func postPath(pre string) string {
|
||||||
|
return path.Join("/", pre, alertPushEndpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
// alertmanagersFromGroup extracts a list of alertmanagers from a target group and an associcated
|
||||||
|
// AlertmanagerConfig.
|
||||||
|
func alertmanagerFromGroup(tg *config.TargetGroup, cfg *config.AlertmanagersConfig) ([]alertmanager, error) {
|
||||||
|
var res []alertmanager
|
||||||
|
|
||||||
|
for _, lset := range tg.Targets {
|
||||||
|
// Set configured scheme as the initial scheme label for overwrite.
|
||||||
|
lset[model.SchemeLabel] = model.LabelValue(cfg.Scheme)
|
||||||
|
lset[pathLabel] = model.LabelValue(postPath(cfg.PathPrefix))
|
||||||
|
|
||||||
|
// Combine target labels with target group labels.
|
||||||
|
for ln, lv := range tg.Labels {
|
||||||
|
if _, ok := lset[ln]; !ok {
|
||||||
|
lset[ln] = lv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lset := relabel.Process(lset, cfg.RelabelConfigs...)
|
||||||
|
if lset == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// addPort checks whether we should add a default port to the address.
|
||||||
|
// If the address is not valid, we don't append a port either.
|
||||||
|
addPort := func(s string) bool {
|
||||||
|
// If we can split, a port exists and we don't have to add one.
|
||||||
|
if _, _, err := net.SplitHostPort(s); err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// If adding a port makes it valid, the previous error
|
||||||
|
// was not due to an invalid address and we can append a port.
|
||||||
|
_, _, err := net.SplitHostPort(s + ":1234")
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
// If it's an address with no trailing port, infer it based on the used scheme.
|
||||||
|
if addr := string(lset[model.AddressLabel]); addPort(addr) {
|
||||||
|
// Addresses reaching this point are already wrapped in [] if necessary.
|
||||||
|
switch lset[model.SchemeLabel] {
|
||||||
|
case "http", "":
|
||||||
|
addr = addr + ":80"
|
||||||
|
case "https":
|
||||||
|
addr = addr + ":443"
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("invalid scheme: %q", cfg.Scheme)
|
||||||
|
}
|
||||||
|
lset[model.AddressLabel] = model.LabelValue(addr)
|
||||||
|
}
|
||||||
|
if err := config.CheckTargetAddress(lset[model.AddressLabel]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Meta labels are deleted after relabelling. Other internal labels propagate to
|
||||||
|
// the target which decides whether they will be part of their label set.
|
||||||
|
for ln := range lset {
|
||||||
|
if strings.HasPrefix(string(ln), model.MetaLabelPrefix) {
|
||||||
|
delete(lset, ln)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res = append(res, alertmanager{labels: lset})
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
|
@ -27,34 +27,34 @@ import (
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPostURL(t *testing.T) {
|
func TestPostPath(t *testing.T) {
|
||||||
var cases = []struct {
|
var cases = []struct {
|
||||||
in, out string
|
in, out string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
in: "http://localhost:9093",
|
in: "",
|
||||||
out: "http://localhost:9093/api/v1/alerts",
|
out: "/api/v1/alerts",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: "http://localhost:9093/",
|
in: "/",
|
||||||
out: "http://localhost:9093/api/v1/alerts",
|
out: "/api/v1/alerts",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: "http://localhost:9093/prefix",
|
in: "/prefix",
|
||||||
out: "http://localhost:9093/prefix/api/v1/alerts",
|
out: "/prefix/api/v1/alerts",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: "http://localhost:9093/prefix//",
|
in: "/prefix//",
|
||||||
out: "http://localhost:9093/prefix/api/v1/alerts",
|
out: "/prefix/api/v1/alerts",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: "http://localhost:9093/prefix//",
|
in: "prefix//",
|
||||||
out: "http://localhost:9093/prefix/api/v1/alerts",
|
out: "/prefix/api/v1/alerts",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
if res := postURL(c.in); res != c.out {
|
if res := postPath(c.in); res != c.out {
|
||||||
t.Errorf("Expected post URL %q for %q but got %q", c.out, c.in, res)
|
t.Errorf("Expected post path %q for %q but got %q", c.out, c.in, res)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -123,9 +123,6 @@ func TestHandlerSendAll(t *testing.T) {
|
||||||
)
|
)
|
||||||
|
|
||||||
f := func(w http.ResponseWriter, r *http.Request) {
|
f := func(w http.ResponseWriter, r *http.Request) {
|
||||||
if r.URL.Path != alertPushEndpoint {
|
|
||||||
t.Fatalf("Bad endpoint %q used, expected %q", r.URL.Path, alertPushEndpoint)
|
|
||||||
}
|
|
||||||
defer r.Body.Close()
|
defer r.Body.Close()
|
||||||
|
|
||||||
var alerts model.Alerts
|
var alerts model.Alerts
|
||||||
|
@ -150,9 +147,15 @@ func TestHandlerSendAll(t *testing.T) {
|
||||||
defer server1.Close()
|
defer server1.Close()
|
||||||
defer server2.Close()
|
defer server2.Close()
|
||||||
|
|
||||||
h := New(&Options{
|
h := New(&Options{})
|
||||||
AlertmanagerURLs: []string{server1.URL, server2.URL},
|
h.alertmanagers = append(h.alertmanagers, &alertmanagerSet{
|
||||||
Timeout: time.Minute,
|
ams: []alertmanager{
|
||||||
|
{plainURL: server1.URL},
|
||||||
|
{plainURL: server2.URL},
|
||||||
|
},
|
||||||
|
cfg: &config.AlertmanagersConfig{
|
||||||
|
Timeout: time.Second,
|
||||||
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
for i := range make([]struct{}, maxBatchSize) {
|
for i := range make([]struct{}, maxBatchSize) {
|
||||||
|
@ -170,18 +173,18 @@ func TestHandlerSendAll(t *testing.T) {
|
||||||
|
|
||||||
status1 = http.StatusOK
|
status1 = http.StatusOK
|
||||||
status2 = http.StatusOK
|
status2 = http.StatusOK
|
||||||
if ne := h.sendAll(h.queue...); ne != 0 {
|
if !h.sendAll(h.queue...) {
|
||||||
t.Fatalf("Unexpected number of failed sends: %d", ne)
|
t.Fatalf("all sends failed unexpectedly")
|
||||||
}
|
}
|
||||||
|
|
||||||
status1 = http.StatusNotFound
|
status1 = http.StatusNotFound
|
||||||
if ne := h.sendAll(h.queue...); ne != 1 {
|
if !h.sendAll(h.queue...) {
|
||||||
t.Fatalf("Unexpected number of failed sends: %d", ne)
|
t.Fatalf("all sends failed unexpectedly")
|
||||||
}
|
}
|
||||||
|
|
||||||
status2 = http.StatusInternalServerError
|
status2 = http.StatusInternalServerError
|
||||||
if ne := h.sendAll(h.queue...); ne != 2 {
|
if h.sendAll(h.queue...) {
|
||||||
t.Fatalf("Unexpected number of failed sends: %d", ne)
|
t.Fatalf("all sends succeeded unexpectedly")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -305,9 +308,15 @@ func TestHandlerQueueing(t *testing.T) {
|
||||||
}))
|
}))
|
||||||
|
|
||||||
h := New(&Options{
|
h := New(&Options{
|
||||||
AlertmanagerURLs: []string{server.URL},
|
QueueCapacity: 3 * maxBatchSize,
|
||||||
Timeout: time.Second,
|
})
|
||||||
QueueCapacity: 3 * maxBatchSize,
|
h.alertmanagers = append(h.alertmanagers, &alertmanagerSet{
|
||||||
|
ams: []alertmanager{
|
||||||
|
{plainURL: server.URL},
|
||||||
|
},
|
||||||
|
cfg: &config.AlertmanagersConfig{
|
||||||
|
Timeout: time.Second,
|
||||||
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
var alerts model.Alerts
|
var alerts model.Alerts
|
||||||
|
|
Loading…
Reference in a new issue