allow relabeling of alerts

in case of dropping don't even enqueue them
This commit is contained in:
Frederic Branczyk 2016-08-09 10:08:15 +02:00
parent 32fad9fbb4
commit 679d225c8d
3 changed files with 71 additions and 5 deletions

View file

@ -176,9 +176,10 @@ func (u URL) MarshalYAML() (interface{}, error) {
// Config is the top-level configuration for Prometheus's config files. // Config is the top-level configuration for Prometheus's config files.
type Config struct { type Config struct {
GlobalConfig GlobalConfig `yaml:"global"` GlobalConfig GlobalConfig `yaml:"global"`
RuleFiles []string `yaml:"rule_files,omitempty"` AlertRelabelConfigs []*RelabelConfig `yaml:"alert_relabel_configs,omitempty"`
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` RuleFiles []string `yaml:"rule_files,omitempty"`
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
// Catches all undefined fields and must be empty after parsing. // Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"` XXX map[string]interface{} `yaml:",inline"`

View file

@ -30,6 +30,7 @@ import (
"golang.org/x/net/context/ctxhttp" "golang.org/x/net/context/ctxhttp"
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/retrieval"
) )
const ( const (
@ -69,9 +70,10 @@ type Options struct {
QueueCapacity int QueueCapacity int
Timeout time.Duration Timeout time.Duration
ExternalLabels model.LabelSet ExternalLabels model.LabelSet
RelabelConfigs []*config.RelabelConfig
} }
// New constructs a neww Notifier. // New constructs a new Notifier.
func New(o *Options) *Notifier { func New(o *Options) *Notifier {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -136,6 +138,7 @@ func (n *Notifier) ApplyConfig(conf *config.Config) error {
defer n.mtx.Unlock() defer n.mtx.Unlock()
n.opts.ExternalLabels = conf.GlobalConfig.ExternalLabels n.opts.ExternalLabels = conf.GlobalConfig.ExternalLabels
n.opts.RelabelConfigs = conf.AlertRelabelConfigs
return nil return nil
} }
@ -208,6 +211,8 @@ func (n *Notifier) Send(alerts ...*model.Alert) {
n.mtx.Lock() n.mtx.Lock()
defer n.mtx.Unlock() defer n.mtx.Unlock()
alerts = n.relabelAlerts(alerts)
// Queue capacity should be significantly larger than a single alert // Queue capacity should be significantly larger than a single alert
// batch could be. // batch could be.
if d := len(alerts) - n.opts.QueueCapacity; d > 0 { if d := len(alerts) - n.opts.QueueCapacity; d > 0 {
@ -231,6 +236,18 @@ func (n *Notifier) Send(alerts ...*model.Alert) {
n.setMore() n.setMore()
} }
func (n *Notifier) relabelAlerts(alerts []*model.Alert) []*model.Alert {
var relabeledAlerts []*model.Alert
for _, alert := range alerts {
labels, _ := retrieval.Relabel(alert.Labels, n.opts.RelabelConfigs...)
if labels != nil {
alert.Labels = labels
relabeledAlerts = append(relabeledAlerts, alert)
}
}
return relabeledAlerts
}
// setMore signals that the alert queue has items. // setMore signals that the alert queue has items.
func (n *Notifier) setMore() { func (n *Notifier) setMore() {
// If we cannot send on the channel, it means the signal already exists // If we cannot send on the channel, it means the signal already exists

View file

@ -23,6 +23,8 @@ import (
"time" "time"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
) )
func TestPostURL(t *testing.T) { func TestPostURL(t *testing.T) {
@ -185,7 +187,53 @@ func TestHandlerSendAll(t *testing.T) {
} }
} }
func TestHandlerFull(t *testing.T) { func TestHandlerRelabel(t *testing.T) {
h := New(&Options{
QueueCapacity: 3 * maxBatchSize,
RelabelConfigs: []*config.RelabelConfig{
&config.RelabelConfig{
SourceLabels: model.LabelNames{"alertname"},
Action: "drop",
Regex: config.MustNewRegexp("drop"),
},
&config.RelabelConfig{
SourceLabels: model.LabelNames{"alertname"},
TargetLabel: "alertname",
Action: "replace",
Regex: config.MustNewRegexp("rename"),
Replacement: "renamed",
},
},
})
// This alert should be dropped due to the configuration
h.Send(&model.Alert{
Labels: model.LabelSet{
"alertname": "drop",
},
})
// This alert should be replaced due to the configuration
h.Send(&model.Alert{
Labels: model.LabelSet{
"alertname": "rename",
},
})
expected := []*model.Alert{
&model.Alert{
Labels: model.LabelSet{
"alertname": "renamed",
},
},
}
if !alertsEqual(expected, h.queue) {
t.Errorf("Expected alerts %v, got %v", expected, h.queue)
}
}
func TestHandlerQueueing(t *testing.T) {
var ( var (
unblock = make(chan struct{}) unblock = make(chan struct{})
called = make(chan struct{}) called = make(chan struct{})