mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-24 13:14:05 -08:00
Refactor rules/ package
This commit is contained in:
parent
e4fabe135a
commit
52e5224f5a
|
@ -155,7 +155,7 @@ func Main() int {
|
|||
prometheus.MustRegister(configSuccess)
|
||||
prometheus.MustRegister(configSuccessTime)
|
||||
|
||||
go ruleManager.Run()
|
||||
// go ruleManager.Run()
|
||||
defer ruleManager.Stop()
|
||||
|
||||
go notificationHandler.Run()
|
||||
|
|
|
@ -168,6 +168,9 @@ func (n *Handler) Run() {
|
|||
|
||||
alerts := n.nextBatch()
|
||||
|
||||
if len(alerts) == 0 {
|
||||
continue
|
||||
}
|
||||
if n.opts.AlertmanagerURL == "" {
|
||||
log.Warn("No AlertManager configured, not dispatching %d alerts", len(alerts))
|
||||
n.dropped.Add(float64(len(alerts)))
|
||||
|
|
|
@ -46,52 +46,21 @@ func (s AlertState) String() string {
|
|||
return "pending"
|
||||
case StateFiring:
|
||||
return "firing"
|
||||
default:
|
||||
panic("undefined")
|
||||
}
|
||||
panic(fmt.Errorf("unknown alert state: %v", s))
|
||||
}
|
||||
|
||||
const (
|
||||
// StateInactive is the state of an alert that is either firing nor pending.
|
||||
StateInactive AlertState = iota
|
||||
// StatePending is the state of an alert that has been active for less than
|
||||
// the configured threshold duration.
|
||||
StatePending
|
||||
// StateFiring is the state of an alert that has been active for longer than
|
||||
// the configured threshold duration.
|
||||
StateFiring
|
||||
)
|
||||
|
||||
// Alert is used to track active (pending/firing) alerts over time.
|
||||
type Alert struct {
|
||||
// The name of the alert.
|
||||
Name string
|
||||
// The vector element labelset triggering this alert.
|
||||
Labels model.LabelSet
|
||||
// The state of the alert (Pending or Firing).
|
||||
State AlertState
|
||||
// The time when the alert first transitioned into Pending state.
|
||||
ActiveSince model.Time
|
||||
// The value of the alert expression for this vector element.
|
||||
Value model.SampleValue
|
||||
}
|
||||
|
||||
// sample returns a Sample suitable for recording the alert.
|
||||
func (a Alert) sample(timestamp model.Time, value model.SampleValue) *model.Sample {
|
||||
recordedMetric := make(model.Metric, len(a.Labels)+3)
|
||||
for label, value := range a.Labels {
|
||||
recordedMetric[label] = value
|
||||
}
|
||||
|
||||
recordedMetric[model.MetricNameLabel] = alertMetricName
|
||||
recordedMetric[alertNameLabel] = model.LabelValue(a.Name)
|
||||
recordedMetric[alertStateLabel] = model.LabelValue(a.State.String())
|
||||
|
||||
return &model.Sample{
|
||||
Metric: recordedMetric,
|
||||
Value: value,
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
type alertInstance struct {
|
||||
metric model.Metric
|
||||
value model.SampleValue
|
||||
state AlertState
|
||||
activeSince model.Time
|
||||
}
|
||||
|
||||
// An AlertingRule generates alerts from its vector expression.
|
||||
|
@ -109,10 +78,10 @@ type AlertingRule struct {
|
|||
annotations model.LabelSet
|
||||
|
||||
// Protects the below.
|
||||
mutex sync.Mutex
|
||||
mtx sync.Mutex
|
||||
// A map of alerts which are currently active (Pending or Firing), keyed by
|
||||
// the fingerprint of the labelset they correspond to.
|
||||
activeAlerts map[model.Fingerprint]*Alert
|
||||
active map[model.Fingerprint]*alertInstance
|
||||
}
|
||||
|
||||
// NewAlertingRule constructs a new AlertingRule.
|
||||
|
@ -123,8 +92,7 @@ func NewAlertingRule(name string, vec promql.Expr, hold time.Duration, lbls, ann
|
|||
holdDuration: hold,
|
||||
labels: lbls,
|
||||
annotations: anns,
|
||||
|
||||
activeAlerts: map[model.Fingerprint]*Alert{},
|
||||
active: map[model.Fingerprint]*alertInstance{},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -133,65 +101,127 @@ func (rule *AlertingRule) Name() string {
|
|||
return rule.name
|
||||
}
|
||||
|
||||
func (r *AlertingRule) sample(ai *alertInstance, ts model.Time, set bool) *model.Sample {
|
||||
// Build alert labels in order they can be overwritten.
|
||||
metric := model.Metric(r.labels.Clone())
|
||||
|
||||
for ln, lv := range ai.metric {
|
||||
metric[ln] = lv
|
||||
}
|
||||
|
||||
metric[model.MetricNameLabel] = alertMetricName
|
||||
metric[model.AlertNameLabel] = model.LabelValue(r.name)
|
||||
metric[alertStateLabel] = model.LabelValue(ai.state.String())
|
||||
|
||||
s := &model.Sample{
|
||||
Metric: metric,
|
||||
Timestamp: ts,
|
||||
Value: 0,
|
||||
}
|
||||
if set {
|
||||
s.Value = 1
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// eval evaluates the rule expression and then creates pending alerts and fires
|
||||
// or removes previously pending alerts accordingly.
|
||||
func (rule *AlertingRule) eval(timestamp model.Time, engine *promql.Engine) (model.Vector, error) {
|
||||
query, err := engine.NewInstantQuery(rule.vector.String(), timestamp)
|
||||
func (r *AlertingRule) eval(ts model.Time, engine *promql.Engine) (model.Vector, error) {
|
||||
query, err := engine.NewInstantQuery(r.vector.String(), ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exprResult, err := query.Exec().Vector()
|
||||
res, err := query.Exec().Vector()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rule.mutex.Lock()
|
||||
defer rule.mutex.Unlock()
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
// Create pending alerts for any new vector elements in the alert expression
|
||||
// or update the expression value for existing elements.
|
||||
resultFPs := map[model.Fingerprint]struct{}{}
|
||||
for _, sample := range exprResult {
|
||||
fp := sample.Metric.Fingerprint()
|
||||
|
||||
for _, smpl := range res {
|
||||
fp := smpl.Metric.Fingerprint()
|
||||
resultFPs[fp] = struct{}{}
|
||||
|
||||
if alert, ok := rule.activeAlerts[fp]; !ok {
|
||||
labels := model.LabelSet(sample.Metric.Clone())
|
||||
labels = labels.Merge(rule.labels)
|
||||
if _, ok := labels[model.MetricNameLabel]; ok {
|
||||
delete(labels, model.MetricNameLabel)
|
||||
}
|
||||
rule.activeAlerts[fp] = &Alert{
|
||||
Name: rule.name,
|
||||
Labels: labels,
|
||||
State: StatePending,
|
||||
ActiveSince: timestamp,
|
||||
Value: sample.Value,
|
||||
}
|
||||
} else {
|
||||
alert.Value = sample.Value
|
||||
}
|
||||
}
|
||||
|
||||
var vector model.Vector
|
||||
|
||||
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
|
||||
for fp, activeAlert := range rule.activeAlerts {
|
||||
if _, ok := resultFPs[fp]; !ok {
|
||||
vector = append(vector, activeAlert.sample(timestamp, 0))
|
||||
delete(rule.activeAlerts, fp)
|
||||
if ai, ok := r.active[fp]; ok {
|
||||
ai.value = smpl.Value
|
||||
continue
|
||||
}
|
||||
|
||||
if activeAlert.State == StatePending && timestamp.Sub(activeAlert.ActiveSince) >= rule.holdDuration {
|
||||
vector = append(vector, activeAlert.sample(timestamp, 0))
|
||||
activeAlert.State = StateFiring
|
||||
}
|
||||
delete(smpl.Metric, model.MetricNameLabel)
|
||||
|
||||
vector = append(vector, activeAlert.sample(timestamp, 1))
|
||||
r.active[fp] = &alertInstance{
|
||||
metric: smpl.Metric,
|
||||
activeSince: ts,
|
||||
state: StatePending,
|
||||
value: smpl.Value,
|
||||
}
|
||||
}
|
||||
|
||||
return vector, nil
|
||||
var vec model.Vector
|
||||
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
|
||||
for fp, ai := range r.active {
|
||||
if _, ok := resultFPs[fp]; !ok {
|
||||
delete(r.active, fp)
|
||||
vec = append(vec, r.sample(ai, ts, false))
|
||||
continue
|
||||
}
|
||||
|
||||
if ai.state != StateFiring && ts.Sub(ai.activeSince) >= r.holdDuration {
|
||||
vec = append(vec, r.sample(ai, ts, false))
|
||||
ai.state = StateFiring
|
||||
}
|
||||
|
||||
vec = append(vec, r.sample(ai, ts, true))
|
||||
}
|
||||
|
||||
return vec, nil
|
||||
}
|
||||
|
||||
// Alert is the user-level representation of a single instance of an alerting rule.
|
||||
type Alert struct {
|
||||
State AlertState
|
||||
Labels model.LabelSet
|
||||
ActiveSince model.Time
|
||||
Value model.SampleValue
|
||||
}
|
||||
|
||||
func (r *AlertingRule) State() AlertState {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
maxState := StateInactive
|
||||
for _, ai := range r.active {
|
||||
if ai.state > maxState {
|
||||
maxState = ai.state
|
||||
}
|
||||
}
|
||||
return maxState
|
||||
}
|
||||
|
||||
// ActiveAlerts returns a slice of active alerts.
|
||||
func (r *AlertingRule) ActiveAlerts() []*Alert {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
alerts := make([]*Alert, 0, len(r.active))
|
||||
for _, ai := range r.active {
|
||||
labels := r.labels.Clone()
|
||||
for ln, lv := range ai.metric {
|
||||
labels[ln] = lv
|
||||
}
|
||||
alerts = append(alerts, &Alert{
|
||||
State: ai.state,
|
||||
Labels: labels,
|
||||
ActiveSince: ai.activeSince,
|
||||
Value: ai.value,
|
||||
})
|
||||
}
|
||||
return alerts
|
||||
}
|
||||
|
||||
func (rule *AlertingRule) String() string {
|
||||
|
@ -230,29 +260,3 @@ func (rule *AlertingRule) HTMLSnippet(pathPrefix string) template.HTML {
|
|||
}
|
||||
return template.HTML(s)
|
||||
}
|
||||
|
||||
// State returns the "maximum" state: firing > pending > inactive.
|
||||
func (rule *AlertingRule) State() AlertState {
|
||||
rule.mutex.Lock()
|
||||
defer rule.mutex.Unlock()
|
||||
|
||||
maxState := StateInactive
|
||||
for _, activeAlert := range rule.activeAlerts {
|
||||
if activeAlert.State > maxState {
|
||||
maxState = activeAlert.State
|
||||
}
|
||||
}
|
||||
return maxState
|
||||
}
|
||||
|
||||
// ActiveAlerts returns a slice of active alerts.
|
||||
func (rule *AlertingRule) ActiveAlerts() []Alert {
|
||||
rule.mutex.Lock()
|
||||
defer rule.mutex.Unlock()
|
||||
|
||||
alerts := make([]Alert, 0, len(rule.activeAlerts))
|
||||
for _, alert := range rule.activeAlerts {
|
||||
alerts = append(alerts, *alert)
|
||||
}
|
||||
return alerts
|
||||
}
|
||||
|
|
451
rules/manager.go
451
rules/manager.go
|
@ -39,9 +39,7 @@ import (
|
|||
const (
|
||||
namespace = "prometheus"
|
||||
|
||||
ruleTypeLabel = "rule_type"
|
||||
ruleTypeAlerting = "alerting"
|
||||
ruleTypeRecording = "recording"
|
||||
ruleTypeLabel = "rule_type"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -74,12 +72,18 @@ func init() {
|
|||
prometheus.MustRegister(evalDuration)
|
||||
}
|
||||
|
||||
type ruleType string
|
||||
|
||||
const (
|
||||
ruleTypeAlert = "alerting"
|
||||
ruleTypeRecording = "recording"
|
||||
)
|
||||
|
||||
// A Rule encapsulates a vector expression which is evaluated at a specified
|
||||
// interval and acted upon (currently either recorded or used for alerting).
|
||||
type Rule interface {
|
||||
// Name returns the name of the rule.
|
||||
Name() string
|
||||
// Eval evaluates the rule, including any associated recording or alerting actions.
|
||||
// eval evaluates the rule, including any associated recording or alerting actions.
|
||||
eval(model.Time, *promql.Engine) (model.Vector, error)
|
||||
// String returns a human-readable string representation of the rule.
|
||||
String() string
|
||||
|
@ -88,306 +92,365 @@ type Rule interface {
|
|||
HTMLSnippet(pathPrefix string) html_template.HTML
|
||||
}
|
||||
|
||||
// The Manager manages recording and alerting rules.
|
||||
type Manager struct {
|
||||
// Protects the rules list.
|
||||
sync.Mutex
|
||||
rules []Rule
|
||||
type Group struct {
|
||||
name string
|
||||
interval time.Duration
|
||||
rules []Rule
|
||||
opts *ManagerOptions
|
||||
|
||||
done chan bool
|
||||
|
||||
interval time.Duration
|
||||
queryEngine *promql.Engine
|
||||
|
||||
sampleAppender storage.SampleAppender
|
||||
notificationHandler *notification.Handler
|
||||
|
||||
externalURL *url.URL
|
||||
done chan struct{}
|
||||
terminated chan struct{}
|
||||
}
|
||||
|
||||
// ManagerOptions bundles options for the Manager.
|
||||
type ManagerOptions struct {
|
||||
EvaluationInterval time.Duration
|
||||
QueryEngine *promql.Engine
|
||||
|
||||
NotificationHandler *notification.Handler
|
||||
SampleAppender storage.SampleAppender
|
||||
|
||||
ExternalURL *url.URL
|
||||
}
|
||||
|
||||
// NewManager returns an implementation of Manager, ready to be started
|
||||
// by calling the Run method.
|
||||
func NewManager(o *ManagerOptions) *Manager {
|
||||
manager := &Manager{
|
||||
rules: []Rule{},
|
||||
done: make(chan bool),
|
||||
|
||||
interval: o.EvaluationInterval,
|
||||
sampleAppender: o.SampleAppender,
|
||||
queryEngine: o.QueryEngine,
|
||||
notificationHandler: o.NotificationHandler,
|
||||
externalURL: o.ExternalURL,
|
||||
func newGroup(name string, opts *ManagerOptions) *Group {
|
||||
return &Group{
|
||||
name: name,
|
||||
opts: opts,
|
||||
done: make(chan struct{}),
|
||||
terminated: make(chan struct{}),
|
||||
}
|
||||
return manager
|
||||
}
|
||||
|
||||
// Run the rule manager's periodic rule evaluation.
|
||||
func (m *Manager) Run() {
|
||||
defer log.Info("Rule manager stopped.")
|
||||
func (g *Group) run() {
|
||||
defer close(g.terminated)
|
||||
|
||||
m.Lock()
|
||||
lastInterval := m.interval
|
||||
m.Unlock()
|
||||
// Wait an initial amount to have consistently slotted intervals.
|
||||
time.Sleep(g.offset())
|
||||
|
||||
ticker := time.NewTicker(lastInterval)
|
||||
defer ticker.Stop()
|
||||
iter := func() {
|
||||
start := time.Now()
|
||||
g.eval()
|
||||
|
||||
iterationDuration.Observe(float64(time.Since(start) / time.Millisecond))
|
||||
}
|
||||
iter()
|
||||
|
||||
tick := time.NewTicker(g.interval)
|
||||
defer tick.Stop()
|
||||
|
||||
for {
|
||||
// The outer select clause makes sure that m.done is looked at
|
||||
// first. Otherwise, if m.runIteration takes longer than
|
||||
// m.interval, there is only a 50% chance that m.done will be
|
||||
// looked at before the next m.runIteration call happens.
|
||||
select {
|
||||
case <-m.done:
|
||||
case <-g.done:
|
||||
return
|
||||
default:
|
||||
select {
|
||||
case <-ticker.C:
|
||||
start := time.Now()
|
||||
m.runIteration()
|
||||
iterationDuration.Observe(float64(time.Since(start) / time.Millisecond))
|
||||
|
||||
m.Lock()
|
||||
if lastInterval != m.interval {
|
||||
ticker.Stop()
|
||||
ticker = time.NewTicker(m.interval)
|
||||
lastInterval = m.interval
|
||||
}
|
||||
m.Unlock()
|
||||
case <-m.done:
|
||||
case <-g.done:
|
||||
return
|
||||
case <-tick.C:
|
||||
iter()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop the rule manager's rule evaluation cycles.
|
||||
func (m *Manager) Stop() {
|
||||
log.Info("Stopping rule manager...")
|
||||
m.done <- true
|
||||
func (g *Group) stop() {
|
||||
close(g.done)
|
||||
<-g.terminated
|
||||
}
|
||||
|
||||
func (m *Manager) sendAlertNotifications(rule *AlertingRule, timestamp model.Time) {
|
||||
activeAlerts := rule.ActiveAlerts()
|
||||
if len(activeAlerts) == 0 {
|
||||
return
|
||||
func (g *Group) fingerprint() model.Fingerprint {
|
||||
l := model.LabelSet{"name": model.LabelValue(g.name)}
|
||||
return l.Fingerprint()
|
||||
}
|
||||
|
||||
func (g *Group) offset() time.Duration {
|
||||
now := time.Now().UnixNano()
|
||||
|
||||
var (
|
||||
base = now - (now % int64(g.interval))
|
||||
offset = uint64(g.fingerprint()) % uint64(g.interval)
|
||||
next = base + int64(offset)
|
||||
)
|
||||
|
||||
if next < now {
|
||||
next += int64(g.interval)
|
||||
}
|
||||
return time.Duration(next - now)
|
||||
}
|
||||
|
||||
alerts := make(model.Alerts, 0, len(activeAlerts))
|
||||
|
||||
for _, aa := range activeAlerts {
|
||||
if aa.State != StateFiring {
|
||||
// BUG: In the future, make AlertManager support pending alerts?
|
||||
func (g *Group) copyState(from *Group) {
|
||||
for _, fromRule := range from.rules {
|
||||
far, ok := fromRule.(*AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Provide the alert information to the template.
|
||||
l := map[string]string{}
|
||||
for k, v := range aa.Labels {
|
||||
l[string(k)] = string(v)
|
||||
}
|
||||
tmplData := struct {
|
||||
Labels map[string]string
|
||||
Value float64
|
||||
}{
|
||||
Labels: l,
|
||||
Value: float64(aa.Value),
|
||||
}
|
||||
// Inject some convenience variables that are easier to remember for users
|
||||
// who are not used to Go's templating system.
|
||||
defs := "{{$labels := .Labels}}{{$value := .Value}}"
|
||||
|
||||
expand := func(text model.LabelValue) model.LabelValue {
|
||||
tmpl := template.NewTemplateExpander(defs+string(text), "__alert_"+rule.Name(), tmplData, timestamp, m.queryEngine, m.externalURL.Path)
|
||||
result, err := tmpl.Expand()
|
||||
if err != nil {
|
||||
result = err.Error()
|
||||
log.Warnf("Error expanding alert template %v with data '%v': %v", rule.Name(), tmplData, err)
|
||||
for _, rule := range g.rules {
|
||||
ar, ok := rule.(*AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if far.Name() == ar.Name() {
|
||||
ar.active = far.active
|
||||
}
|
||||
return model.LabelValue(result)
|
||||
}
|
||||
|
||||
labels := aa.Labels.Clone()
|
||||
labels[model.AlertNameLabel] = model.LabelValue(rule.Name())
|
||||
|
||||
annotations := rule.annotations.Clone()
|
||||
for an, av := range rule.annotations {
|
||||
annotations[an] = expand(av)
|
||||
}
|
||||
|
||||
alerts = append(alerts, &model.Alert{
|
||||
StartsAt: aa.ActiveSince.Time().Add(rule.holdDuration),
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
GeneratorURL: m.externalURL.String() + strutil.GraphLinkForExpression(rule.vector.String()),
|
||||
})
|
||||
}
|
||||
m.notificationHandler.Send(alerts...)
|
||||
}
|
||||
|
||||
func (m *Manager) runIteration() {
|
||||
now := model.Now()
|
||||
wg := sync.WaitGroup{}
|
||||
func (g *Group) eval() {
|
||||
var (
|
||||
now = model.Now()
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
m.Lock()
|
||||
rulesSnapshot := make([]Rule, len(m.rules))
|
||||
copy(rulesSnapshot, m.rules)
|
||||
m.Unlock()
|
||||
|
||||
for _, rule := range rulesSnapshot {
|
||||
for _, rule := range g.rules {
|
||||
wg.Add(1)
|
||||
// BUG(julius): Look at fixing thundering herd.
|
||||
go func(rule Rule) {
|
||||
defer wg.Done()
|
||||
|
||||
start := time.Now()
|
||||
vector, err := rule.eval(now, m.queryEngine)
|
||||
duration := time.Since(start)
|
||||
|
||||
vector, err := rule.eval(now, g.opts.QueryEngine)
|
||||
if err != nil {
|
||||
evalFailures.Inc()
|
||||
log.Warnf("Error while evaluating rule %q: %s", rule, err)
|
||||
return
|
||||
}
|
||||
var rtyp ruleType
|
||||
|
||||
switch r := rule.(type) {
|
||||
case *AlertingRule:
|
||||
m.sendAlertNotifications(r, now)
|
||||
evalDuration.WithLabelValues(ruleTypeAlerting).Observe(
|
||||
float64(duration / time.Millisecond),
|
||||
)
|
||||
rtyp = ruleTypeRecording
|
||||
g.sendAlerts(r, now)
|
||||
|
||||
case *RecordingRule:
|
||||
evalDuration.WithLabelValues(ruleTypeRecording).Observe(
|
||||
float64(duration / time.Millisecond),
|
||||
)
|
||||
rtyp = ruleTypeAlert
|
||||
|
||||
default:
|
||||
panic(fmt.Errorf("unknown rule type: %T", rule))
|
||||
}
|
||||
|
||||
evalDuration.WithLabelValues(string(rtyp)).Observe(
|
||||
float64(time.Since(start) / time.Millisecond),
|
||||
)
|
||||
|
||||
for _, s := range vector {
|
||||
m.sampleAppender.Append(s)
|
||||
g.opts.SampleAppender.Append(s)
|
||||
}
|
||||
}(rule)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// transferAlertState makes a copy of the state of alerting rules and returns a function
|
||||
// that restores them in the current state.
|
||||
func (m *Manager) transferAlertState() func() {
|
||||
|
||||
alertingRules := map[string]*AlertingRule{}
|
||||
for _, r := range m.rules {
|
||||
if ar, ok := r.(*AlertingRule); ok {
|
||||
alertingRules[ar.name] = ar
|
||||
func (g *Group) sendAlerts(rule *AlertingRule, timestamp model.Time) error {
|
||||
var alerts model.Alerts
|
||||
for _, alert := range rule.ActiveAlerts() {
|
||||
// Only send actually firing alerts.
|
||||
if alert.State != StateFiring {
|
||||
continue
|
||||
}
|
||||
|
||||
// Provide the alert information to the template.
|
||||
l := make(map[string]string, len(alert.Labels))
|
||||
for k, v := range alert.Labels {
|
||||
l[string(k)] = string(v)
|
||||
}
|
||||
|
||||
tmplData := struct {
|
||||
Labels map[string]string
|
||||
Value float64
|
||||
}{
|
||||
Labels: l,
|
||||
Value: float64(alert.Value),
|
||||
}
|
||||
// Inject some convenience variables that are easier to remember for users
|
||||
// who are not used to Go's templating system.
|
||||
defs := "{{$labels := .Labels}}{{$value := .Value}}"
|
||||
|
||||
expand := func(text model.LabelValue) model.LabelValue {
|
||||
tmpl := template.NewTemplateExpander(
|
||||
defs+string(text),
|
||||
"__alert_"+rule.Name(),
|
||||
tmplData,
|
||||
timestamp,
|
||||
g.opts.QueryEngine,
|
||||
g.opts.ExternalURL.Path,
|
||||
)
|
||||
result, err := tmpl.Expand()
|
||||
if err != nil {
|
||||
result = fmt.Sprintf("<error expanding template: %s>", err)
|
||||
log.Warnf("Error expanding alert template %v with data '%v': %s", rule.Name(), tmplData, err)
|
||||
}
|
||||
return model.LabelValue(result)
|
||||
}
|
||||
|
||||
labels := make(model.LabelSet, len(alert.Labels)+1)
|
||||
for ln, lv := range alert.Labels {
|
||||
labels[ln] = expand(lv)
|
||||
}
|
||||
labels[model.AlertNameLabel] = model.LabelValue(rule.Name())
|
||||
|
||||
annotations := make(model.LabelSet, len(rule.annotations))
|
||||
for an, av := range rule.annotations {
|
||||
annotations[an] = expand(av)
|
||||
}
|
||||
|
||||
alerts = append(alerts, &model.Alert{
|
||||
StartsAt: alert.ActiveSince.Time().Add(rule.holdDuration),
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
GeneratorURL: g.opts.ExternalURL.String() + strutil.GraphLinkForExpression(rule.vector.String()),
|
||||
})
|
||||
}
|
||||
|
||||
return func() {
|
||||
// Restore alerting rule state.
|
||||
for _, r := range m.rules {
|
||||
ar, ok := r.(*AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if old, ok := alertingRules[ar.name]; ok {
|
||||
ar.activeAlerts = old.activeAlerts
|
||||
}
|
||||
}
|
||||
if len(alerts) > 0 {
|
||||
g.opts.NotificationHandler.Send(alerts...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// The Manager manages recording and alerting rules.
|
||||
type Manager struct {
|
||||
opts *ManagerOptions
|
||||
groups map[string]*Group
|
||||
mtx sync.RWMutex
|
||||
}
|
||||
|
||||
// ManagerOptions bundles options for the Manager.
|
||||
type ManagerOptions struct {
|
||||
ExternalURL *url.URL
|
||||
QueryEngine *promql.Engine
|
||||
NotificationHandler *notification.Handler
|
||||
SampleAppender storage.SampleAppender
|
||||
}
|
||||
|
||||
// NewManager returns an implementation of Manager, ready to be started
|
||||
// by calling the Run method.
|
||||
func NewManager(o *ManagerOptions) *Manager {
|
||||
manager := &Manager{
|
||||
groups: map[string]*Group{},
|
||||
opts: o,
|
||||
}
|
||||
return manager
|
||||
}
|
||||
|
||||
// Stop the rule manager's rule evaluation cycles.
|
||||
func (m *Manager) Stop() {
|
||||
log.Info("Stopping rule manager...")
|
||||
|
||||
for _, eg := range m.groups {
|
||||
eg.stop()
|
||||
}
|
||||
|
||||
log.Info("Rule manager stopped.")
|
||||
}
|
||||
|
||||
// ApplyConfig updates the rule manager's state as the config requires. If
|
||||
// loading the new rules failed the old rule set is restored. Returns true on success.
|
||||
func (m *Manager) ApplyConfig(conf *config.Config) bool {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
defer m.transferAlertState()()
|
||||
|
||||
success := true
|
||||
m.interval = time.Duration(conf.GlobalConfig.EvaluationInterval)
|
||||
|
||||
rulesSnapshot := make([]Rule, len(m.rules))
|
||||
copy(rulesSnapshot, m.rules)
|
||||
m.rules = m.rules[:0]
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// Get all rule files and load the groups they define.
|
||||
var files []string
|
||||
for _, pat := range conf.RuleFiles {
|
||||
fs, err := filepath.Glob(pat)
|
||||
if err != nil {
|
||||
// The only error can be a bad pattern.
|
||||
log.Errorf("Error retrieving rule files for %s: %s", pat, err)
|
||||
success = false
|
||||
return false
|
||||
}
|
||||
files = append(files, fs...)
|
||||
}
|
||||
if err := m.loadRuleFiles(files...); err != nil {
|
||||
// If loading the new rules failed, restore the old rule set.
|
||||
m.rules = rulesSnapshot
|
||||
|
||||
groups, err := m.loadGroups(files...)
|
||||
if err != nil {
|
||||
log.Errorf("Error loading rules, previous rule set restored: %s", err)
|
||||
success = false
|
||||
return false
|
||||
}
|
||||
|
||||
return success
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, newg := range groups {
|
||||
// To be replaced with a configurable per-group interval.
|
||||
newg.interval = time.Duration(conf.GlobalConfig.EvaluationInterval)
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
// If there is an old group with the same identifier, stop it and wait for
|
||||
// it to finish the current iteration. Then copy its into the new group.
|
||||
oldg, ok := m.groups[newg.name]
|
||||
delete(m.groups, newg.name)
|
||||
|
||||
go func(newg *Group) {
|
||||
if ok {
|
||||
oldg.stop()
|
||||
newg.copyState(oldg)
|
||||
}
|
||||
go newg.run()
|
||||
wg.Done()
|
||||
}(newg)
|
||||
}
|
||||
|
||||
// Stop remaining old groups.
|
||||
for _, oldg := range m.groups {
|
||||
oldg.stop()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
m.groups = groups
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// loadRuleFiles loads alerting and recording rules from the given files.
|
||||
func (m *Manager) loadRuleFiles(filenames ...string) error {
|
||||
func (m *Manager) loadGroups(filenames ...string) (map[string]*Group, error) {
|
||||
groups := map[string]*Group{}
|
||||
|
||||
// Currently there is no group syntax implemented. Thus all rules
|
||||
// are read into a single default group.
|
||||
g := newGroup("default", m.opts)
|
||||
groups[g.name] = g
|
||||
|
||||
for _, fn := range filenames {
|
||||
content, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
stmts, err := promql.ParseStmts(string(content))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing %s: %s", fn, err)
|
||||
return nil, fmt.Errorf("error parsing %s: %s", fn, err)
|
||||
}
|
||||
|
||||
for _, stmt := range stmts {
|
||||
var rule Rule
|
||||
|
||||
switch r := stmt.(type) {
|
||||
case *promql.AlertStmt:
|
||||
rule := NewAlertingRule(r.Name, r.Expr, r.Duration, r.Labels, r.Annotations)
|
||||
m.rules = append(m.rules, rule)
|
||||
rule = NewAlertingRule(r.Name, r.Expr, r.Duration, r.Labels, r.Annotations)
|
||||
|
||||
case *promql.RecordStmt:
|
||||
rule := NewRecordingRule(r.Name, r.Expr, r.Labels)
|
||||
m.rules = append(m.rules, rule)
|
||||
rule = NewRecordingRule(r.Name, r.Expr, r.Labels)
|
||||
|
||||
default:
|
||||
panic("retrieval.Manager.LoadRuleFiles: unknown statement type")
|
||||
}
|
||||
g.rules = append(g.rules, rule)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
// Rules returns the list of the manager's rules.
|
||||
func (m *Manager) Rules() []Rule {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
|
||||
var rules []Rule
|
||||
for _, g := range m.groups {
|
||||
rules = append(rules, g.rules...)
|
||||
}
|
||||
|
||||
rules := make([]Rule, len(m.rules))
|
||||
copy(rules, m.rules)
|
||||
return rules
|
||||
}
|
||||
|
||||
// AlertingRules returns the list of the manager's alerting rules.
|
||||
func (m *Manager) AlertingRules() []*AlertingRule {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
|
||||
alerts := []*AlertingRule{}
|
||||
for _, rule := range m.rules {
|
||||
for _, rule := range m.Rules() {
|
||||
if alertingRule, ok := rule.(*AlertingRule); ok {
|
||||
alerts = append(alerts, alertingRule)
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ package rules
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
// "reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -138,46 +138,3 @@ func annotateWithTime(lines []string, timestamp model.Time) []string {
|
|||
}
|
||||
return annotatedLines
|
||||
}
|
||||
|
||||
func TestTransferAlertState(t *testing.T) {
|
||||
m := NewManager(&ManagerOptions{})
|
||||
|
||||
alert := &Alert{
|
||||
Name: "testalert",
|
||||
State: StateFiring,
|
||||
}
|
||||
|
||||
arule := AlertingRule{
|
||||
name: "test",
|
||||
activeAlerts: map[model.Fingerprint]*Alert{},
|
||||
}
|
||||
aruleCopy := arule
|
||||
|
||||
m.rules = append(m.rules, &arule)
|
||||
|
||||
// Set an alert.
|
||||
arule.activeAlerts[0] = alert
|
||||
|
||||
// Save state and get the restore function.
|
||||
restore := m.transferAlertState()
|
||||
|
||||
// Remove arule from the rule list and add an unrelated rule and the
|
||||
// stateless copy of arule.
|
||||
m.rules = []Rule{
|
||||
&AlertingRule{
|
||||
name: "test_other",
|
||||
activeAlerts: map[model.Fingerprint]*Alert{},
|
||||
},
|
||||
&aruleCopy,
|
||||
}
|
||||
|
||||
// Apply the restore function.
|
||||
restore()
|
||||
|
||||
if ar := m.rules[0].(*AlertingRule); len(ar.activeAlerts) != 0 {
|
||||
t.Fatalf("unexpected alert for unrelated alerting rule")
|
||||
}
|
||||
if ar := m.rules[1].(*AlertingRule); !reflect.DeepEqual(ar.activeAlerts[0], alert) {
|
||||
t.Fatalf("alert state was not restored")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,9 @@ func NewRecordingRule(name string, vector promql.Expr, labels model.LabelSet) *R
|
|||
}
|
||||
|
||||
// Name returns the rule name.
|
||||
func (rule RecordingRule) Name() string { return rule.name }
|
||||
func (rule RecordingRule) Name() string {
|
||||
return rule.name
|
||||
}
|
||||
|
||||
// eval evaluates the rule and then overrides the metric names and labels accordingly.
|
||||
func (rule RecordingRule) eval(timestamp model.Time, engine *promql.Engine) (model.Vector, error) {
|
||||
|
|
|
@ -134,7 +134,7 @@ func webUiTemplatesAlertsHtml() (*asset, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "web/ui/templates/alerts.html", size: 1707, mode: os.FileMode(420), modTime: time.Unix(1450269200, 0)}
|
||||
info := bindataFileInfo{name: "web/ui/templates/alerts.html", size: 1707, mode: os.FileMode(420), modTime: time.Unix(1450348618, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue