mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-24 21:24:05 -08:00
Merge pull request #13735 from bboreham/fix-notifier-relabel
[BUGFIX] Alerts: don't reuse payload after relabeling.
This commit is contained in:
commit
e8bf2ce4e1
|
@ -481,14 +481,11 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
|
||||||
|
|
||||||
if len(ams.cfg.AlertRelabelConfigs) > 0 {
|
if len(ams.cfg.AlertRelabelConfigs) > 0 {
|
||||||
amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts)
|
amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts)
|
||||||
// TODO(nabokihms): figure out the right way to cache marshalled alerts.
|
|
||||||
// Now it works well only for happy cases.
|
|
||||||
v1Payload = nil
|
|
||||||
v2Payload = nil
|
|
||||||
|
|
||||||
if len(amAlerts) == 0 {
|
if len(amAlerts) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
// We can't use the cached values from previous iteration.
|
||||||
|
v1Payload, v2Payload = nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
switch ams.cfg.APIVersion {
|
switch ams.cfg.APIVersion {
|
||||||
|
@ -531,15 +528,20 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(ams.cfg.AlertRelabelConfigs) > 0 {
|
||||||
|
// We can't use the cached values on the next iteration.
|
||||||
|
v1Payload, v2Payload = nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
for _, am := range ams.ams {
|
for _, am := range ams.ams {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(n.ctx, time.Duration(ams.cfg.Timeout))
|
ctx, cancel := context.WithTimeout(n.ctx, time.Duration(ams.cfg.Timeout))
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
go func(client *http.Client, url string) {
|
go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) {
|
||||||
if err := n.sendOne(ctx, client, url, payload); err != nil {
|
if err := n.sendOne(ctx, client, url, payload); err != nil {
|
||||||
level.Error(n.logger).Log("alertmanager", url, "count", len(amAlerts), "msg", "Error sending alert", "err", err)
|
level.Error(n.logger).Log("alertmanager", url, "count", count, "msg", "Error sending alert", "err", err)
|
||||||
n.metrics.errors.WithLabelValues(url).Inc()
|
n.metrics.errors.WithLabelValues(url).Inc()
|
||||||
} else {
|
} else {
|
||||||
numSuccess.Inc()
|
numSuccess.Inc()
|
||||||
|
@ -548,7 +550,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
|
||||||
n.metrics.sent.WithLabelValues(url).Add(float64(len(amAlerts)))
|
n.metrics.sent.WithLabelValues(url).Add(float64(len(amAlerts)))
|
||||||
|
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}(ams.client, am.url().String())
|
}(ctx, ams.client, am.url().String(), payload, len(amAlerts))
|
||||||
}
|
}
|
||||||
|
|
||||||
ams.mtx.RUnlock()
|
ams.mtx.RUnlock()
|
||||||
|
|
Loading…
Reference in a new issue