2015-01-21 11:07:45 -08:00
|
|
|
// Copyright 2013 The Prometheus Authors
|
2013-01-07 14:24:26 -08:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2015-03-30 10:43:19 -07:00
|
|
|
package rules
|
2013-01-07 14:24:26 -08:00
|
|
|
|
|
|
|
import (
|
2013-12-10 07:54:35 -08:00
|
|
|
"fmt"
|
2015-04-29 02:08:56 -07:00
|
|
|
"io/ioutil"
|
2015-06-30 05:38:01 -07:00
|
|
|
"net/url"
|
2015-05-27 22:36:21 -07:00
|
|
|
"path/filepath"
|
2013-04-17 05:42:15 -07:00
|
|
|
"sync"
|
2013-01-07 14:24:26 -08:00
|
|
|
"time"
|
|
|
|
|
promql: Allow per-query contexts.
For Weaveworks' Frankenstein, we need to support multitenancy. In
Frankenstein, we initially solved this without modifying the promql
package at all: we constructed a new promql.Engine for every
query and injected a storage implementation into that engine which would
be primed to only collect data for a given user.
This is problematic to upstream, however. Prometheus assumes that there
is only one engine: the query concurrency gate is part of the engine,
and the engine contains one central cancellable context to shut down all
queries. Also, creating a new engine for every query seems like overkill.
Thus, we want to be able to pass per-query contexts into a single engine.
This change gets rid of the promql.Engine's built-in base context and
allows passing in a per-query context instead. Central cancellation of
all queries is still possible by deriving all passed-in contexts from
one central one, but this is now the responsibility of the caller. The
central query context is now created in main() and passed into the
relevant components (web handler / API, rule manager).
In a next step, the per-query context would have to be passed to the
storage implementation, so that the storage can implement multi-tenancy
or other features based on the contextual information.
2016-09-15 04:52:50 -07:00
|
|
|
"golang.org/x/net/context"
|
|
|
|
|
2015-06-30 02:51:05 -07:00
|
|
|
html_template "html/template"
|
|
|
|
|
2014-06-18 10:43:15 -07:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2015-10-03 01:21:43 -07:00
|
|
|
"github.com/prometheus/common/log"
|
2015-08-20 08:18:46 -07:00
|
|
|
"github.com/prometheus/common/model"
|
2013-06-25 05:02:27 -07:00
|
|
|
|
2015-05-12 07:52:56 -07:00
|
|
|
"github.com/prometheus/prometheus/config"
|
2016-03-01 03:37:22 -08:00
|
|
|
"github.com/prometheus/prometheus/notifier"
|
2015-03-30 10:43:19 -07:00
|
|
|
"github.com/prometheus/prometheus/promql"
|
2015-03-14 19:36:15 -07:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2016-05-19 07:22:49 -07:00
|
|
|
"github.com/prometheus/prometheus/storage/local"
|
2015-05-29 04:30:30 -07:00
|
|
|
"github.com/prometheus/prometheus/util/strutil"
|
2013-06-25 05:02:27 -07:00
|
|
|
)
|
2013-01-07 14:24:26 -08:00
|
|
|
|
2014-06-18 10:43:15 -07:00
|
|
|
// Constants for instrumentation.
|
2016-01-21 02:09:24 -08:00
|
|
|
const namespace = "prometheus"
|
2014-06-18 10:43:15 -07:00
|
|
|
|
|
|
|
var (
|
|
|
|
evalDuration = prometheus.NewSummaryVec(
|
|
|
|
prometheus.SummaryOpts{
|
2014-07-23 10:55:33 -07:00
|
|
|
Namespace: namespace,
|
2015-12-15 04:15:07 -08:00
|
|
|
Name: "rule_evaluation_duration_seconds",
|
2014-07-23 10:55:33 -07:00
|
|
|
Help: "The duration for a rule to execute.",
|
2014-06-18 10:43:15 -07:00
|
|
|
},
|
2016-01-21 02:09:24 -08:00
|
|
|
[]string{"rule_type"},
|
2014-06-18 10:43:15 -07:00
|
|
|
)
|
2016-01-21 02:09:24 -08:00
|
|
|
evalFailures = prometheus.NewCounterVec(
|
2014-12-31 04:16:08 -08:00
|
|
|
prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Name: "rule_evaluation_failures_total",
|
|
|
|
Help: "The total number of rule evaluation failures.",
|
|
|
|
},
|
2016-01-21 02:09:24 -08:00
|
|
|
[]string{"rule_type"},
|
2014-12-31 04:16:08 -08:00
|
|
|
)
|
2016-01-21 02:09:24 -08:00
|
|
|
evalTotal = prometheus.NewCounterVec(
|
2015-12-15 04:15:07 -08:00
|
|
|
prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Name: "rule_evaluations_total",
|
|
|
|
Help: "The total number of rule evaluations.",
|
|
|
|
},
|
2016-01-21 02:09:24 -08:00
|
|
|
[]string{"rule_type"},
|
2015-12-15 04:15:07 -08:00
|
|
|
)
|
2014-07-23 10:55:33 -07:00
|
|
|
iterationDuration = prometheus.NewSummary(prometheus.SummaryOpts{
|
|
|
|
Namespace: namespace,
|
2015-12-15 04:15:07 -08:00
|
|
|
Name: "evaluator_duration_seconds",
|
2016-01-27 10:07:46 -08:00
|
|
|
Help: "The duration of rule group evaluations.",
|
2015-01-21 06:42:25 -08:00
|
|
|
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
|
2014-07-23 10:55:33 -07:00
|
|
|
})
|
2016-01-27 10:07:46 -08:00
|
|
|
iterationsSkipped = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Name: "evaluator_iterations_skipped_total",
|
|
|
|
Help: "The total number of rule group evaluations skipped due to throttled metric storage.",
|
|
|
|
})
|
|
|
|
iterationsScheduled = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Name: "evaluator_iterations_total",
|
|
|
|
Help: "The total number of scheduled rule group evaluations, whether skipped or executed.",
|
|
|
|
})
|
2014-06-18 10:43:15 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
2016-01-21 02:09:24 -08:00
|
|
|
evalTotal.WithLabelValues(string(ruleTypeAlert))
|
|
|
|
evalTotal.WithLabelValues(string(ruleTypeRecording))
|
|
|
|
evalFailures.WithLabelValues(string(ruleTypeAlert))
|
|
|
|
evalFailures.WithLabelValues(string(ruleTypeRecording))
|
|
|
|
|
2014-06-18 10:43:15 -07:00
|
|
|
prometheus.MustRegister(iterationDuration)
|
2016-01-27 10:07:46 -08:00
|
|
|
prometheus.MustRegister(iterationsSkipped)
|
2014-12-31 04:16:08 -08:00
|
|
|
prometheus.MustRegister(evalFailures)
|
2014-06-18 10:43:15 -07:00
|
|
|
prometheus.MustRegister(evalDuration)
|
|
|
|
}
|
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
type ruleType string
|
|
|
|
|
|
|
|
const (
|
|
|
|
ruleTypeAlert = "alerting"
|
|
|
|
ruleTypeRecording = "recording"
|
|
|
|
)
|
|
|
|
|
2015-06-30 02:18:07 -07:00
|
|
|
// A Rule encapsulates a vector expression which is evaluated at a specified
|
|
|
|
// interval and acted upon (currently either recorded or used for alerting).
|
|
|
|
type Rule interface {
|
|
|
|
Name() string
|
2015-12-14 08:40:40 -08:00
|
|
|
// eval evaluates the rule, including any associated recording or alerting actions.
|
promql: Allow per-query contexts.
For Weaveworks' Frankenstein, we need to support multitenancy. In
Frankenstein, we initially solved this without modifying the promql
package at all: we constructed a new promql.Engine for every
query and injected a storage implementation into that engine which would
be primed to only collect data for a given user.
This is problematic to upstream, however. Prometheus assumes that there
is only one engine: the query concurrency gate is part of the engine,
and the engine contains one central cancellable context to shut down all
queries. Also, creating a new engine for every query seems like overkill.
Thus, we want to be able to pass per-query contexts into a single engine.
This change gets rid of the promql.Engine's built-in base context and
allows passing in a per-query context instead. Central cancellation of
all queries is still possible by deriving all passed-in contexts from
one central one, but this is now the responsibility of the caller. The
central query context is now created in main() and passed into the
relevant components (web handler / API, rule manager).
In a next step, the per-query context would have to be passed to the
storage implementation, so that the storage can implement multi-tenancy
or other features based on the contextual information.
2016-09-15 04:52:50 -07:00
|
|
|
eval(model.Time, *promql.Engine, context.Context, string) (model.Vector, error)
|
2015-06-30 02:18:07 -07:00
|
|
|
// String returns a human-readable string representation of the rule.
|
|
|
|
String() string
|
|
|
|
// HTMLSnippet returns a human-readable string representation of the rule,
|
|
|
|
// decorated with HTML elements for use the web frontend.
|
2015-06-30 02:51:05 -07:00
|
|
|
HTMLSnippet(pathPrefix string) html_template.HTML
|
2015-06-30 02:18:07 -07:00
|
|
|
}
|
|
|
|
|
2015-12-17 02:46:10 -08:00
|
|
|
// Group is a set of rules that have a logical relation.
|
2015-12-14 08:40:40 -08:00
|
|
|
type Group struct {
|
|
|
|
name string
|
|
|
|
interval time.Duration
|
|
|
|
rules []Rule
|
|
|
|
opts *ManagerOptions
|
2013-08-20 06:42:06 -07:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
done chan struct{}
|
|
|
|
terminated chan struct{}
|
|
|
|
}
|
2013-08-20 06:42:06 -07:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
func newGroup(name string, opts *ManagerOptions) *Group {
|
|
|
|
return &Group{
|
|
|
|
name: name,
|
|
|
|
opts: opts,
|
|
|
|
done: make(chan struct{}),
|
|
|
|
terminated: make(chan struct{}),
|
|
|
|
}
|
2013-08-20 06:42:06 -07:00
|
|
|
}
|
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
func (g *Group) run() {
|
|
|
|
defer close(g.terminated)
|
2013-08-20 06:42:06 -07:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
// Wait an initial amount to have consistently slotted intervals.
|
2016-01-12 01:52:40 -08:00
|
|
|
select {
|
|
|
|
case <-time.After(g.offset()):
|
|
|
|
case <-g.done:
|
|
|
|
return
|
|
|
|
}
|
2013-08-20 06:42:06 -07:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
iter := func() {
|
2016-01-27 10:07:46 -08:00
|
|
|
iterationsScheduled.Inc()
|
|
|
|
if g.opts.SampleAppender.NeedsThrottling() {
|
|
|
|
iterationsSkipped.Inc()
|
|
|
|
return
|
|
|
|
}
|
2015-12-14 08:40:40 -08:00
|
|
|
start := time.Now()
|
|
|
|
g.eval()
|
2013-01-07 14:24:26 -08:00
|
|
|
|
2016-07-07 06:24:35 -07:00
|
|
|
iterationDuration.Observe(time.Since(start).Seconds())
|
2013-01-07 14:24:26 -08:00
|
|
|
}
|
2015-12-14 08:40:40 -08:00
|
|
|
iter()
|
2015-01-29 06:05:10 -08:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
tick := time.NewTicker(g.interval)
|
|
|
|
defer tick.Stop()
|
2013-01-07 14:24:26 -08:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2015-12-14 08:40:40 -08:00
|
|
|
case <-g.done:
|
2013-12-11 06:30:27 -08:00
|
|
|
return
|
2015-01-29 06:05:10 -08:00
|
|
|
default:
|
|
|
|
select {
|
2015-12-14 08:40:40 -08:00
|
|
|
case <-g.done:
|
2015-01-29 06:05:10 -08:00
|
|
|
return
|
2015-12-14 08:40:40 -08:00
|
|
|
case <-tick.C:
|
|
|
|
iter()
|
2015-01-29 06:05:10 -08:00
|
|
|
}
|
2013-01-07 14:24:26 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
func (g *Group) stop() {
|
|
|
|
close(g.done)
|
|
|
|
<-g.terminated
|
2013-01-07 14:24:26 -08:00
|
|
|
}
|
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
func (g *Group) fingerprint() model.Fingerprint {
|
|
|
|
l := model.LabelSet{"name": model.LabelValue(g.name)}
|
|
|
|
return l.Fingerprint()
|
|
|
|
}
|
|
|
|
|
2015-12-17 02:46:10 -08:00
|
|
|
// offset returns until the next consistently slotted evaluation interval.
|
2015-12-14 08:40:40 -08:00
|
|
|
func (g *Group) offset() time.Duration {
|
|
|
|
now := time.Now().UnixNano()
|
|
|
|
|
|
|
|
var (
|
|
|
|
base = now - (now % int64(g.interval))
|
|
|
|
offset = uint64(g.fingerprint()) % uint64(g.interval)
|
|
|
|
next = base + int64(offset)
|
|
|
|
)
|
|
|
|
|
|
|
|
if next < now {
|
|
|
|
next += int64(g.interval)
|
2013-07-30 08:18:07 -07:00
|
|
|
}
|
2015-12-14 08:40:40 -08:00
|
|
|
return time.Duration(next - now)
|
|
|
|
}
|
2013-07-30 08:18:07 -07:00
|
|
|
|
2015-12-17 02:46:10 -08:00
|
|
|
// copyState copies the alerting rule state from the given group.
|
2015-12-14 08:40:40 -08:00
|
|
|
func (g *Group) copyState(from *Group) {
|
|
|
|
for _, fromRule := range from.rules {
|
|
|
|
far, ok := fromRule.(*AlertingRule)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, rule := range g.rules {
|
|
|
|
ar, ok := rule.(*AlertingRule)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
2016-03-02 02:54:37 -08:00
|
|
|
// TODO(fabxc): forbid same alert definitions that are not unique by
|
|
|
|
// at least on static label or alertname?
|
|
|
|
if far.equal(ar) {
|
|
|
|
for fp, a := range far.active {
|
|
|
|
ar.active[fp] = a
|
|
|
|
}
|
2015-12-14 08:40:40 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-21 02:09:24 -08:00
|
|
|
func typeForRule(r Rule) ruleType {
|
|
|
|
switch r.(type) {
|
|
|
|
case *AlertingRule:
|
|
|
|
return ruleTypeAlert
|
|
|
|
case *RecordingRule:
|
|
|
|
return ruleTypeRecording
|
|
|
|
}
|
|
|
|
panic(fmt.Errorf("unknown rule type: %T", r))
|
|
|
|
}
|
|
|
|
|
2015-12-17 02:46:10 -08:00
|
|
|
// eval runs a single evaluation cycle in which all rules are evaluated in parallel.
|
|
|
|
// In the future a single group will be evaluated sequentially to properly handle
|
|
|
|
// rule dependency.
|
2015-12-14 08:40:40 -08:00
|
|
|
func (g *Group) eval() {
|
|
|
|
var (
|
|
|
|
now = model.Now()
|
|
|
|
wg sync.WaitGroup
|
|
|
|
)
|
|
|
|
|
|
|
|
for _, rule := range g.rules {
|
2016-01-21 02:09:24 -08:00
|
|
|
rtyp := string(typeForRule(rule))
|
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
wg.Add(1)
|
|
|
|
// BUG(julius): Look at fixing thundering herd.
|
|
|
|
go func(rule Rule) {
|
|
|
|
defer wg.Done()
|
|
|
|
|
2016-01-21 02:09:24 -08:00
|
|
|
defer func(t time.Time) {
|
2016-07-07 06:24:35 -07:00
|
|
|
evalDuration.WithLabelValues(rtyp).Observe(time.Since(t).Seconds())
|
2016-01-21 02:09:24 -08:00
|
|
|
}(time.Now())
|
|
|
|
|
|
|
|
evalTotal.WithLabelValues(rtyp).Inc()
|
2015-12-14 08:40:40 -08:00
|
|
|
|
promql: Allow per-query contexts.
For Weaveworks' Frankenstein, we need to support multitenancy. In
Frankenstein, we initially solved this without modifying the promql
package at all: we constructed a new promql.Engine for every
query and injected a storage implementation into that engine which would
be primed to only collect data for a given user.
This is problematic to upstream, however. Prometheus assumes that there
is only one engine: the query concurrency gate is part of the engine,
and the engine contains one central cancellable context to shut down all
queries. Also, creating a new engine for every query seems like overkill.
Thus, we want to be able to pass per-query contexts into a single engine.
This change gets rid of the promql.Engine's built-in base context and
allows passing in a per-query context instead. Central cancellation of
all queries is still possible by deriving all passed-in contexts from
one central one, but this is now the responsibility of the caller. The
central query context is now created in main() and passed into the
relevant components (web handler / API, rule manager).
In a next step, the per-query context would have to be passed to the
storage implementation, so that the storage can implement multi-tenancy
or other features based on the contextual information.
2016-09-15 04:52:50 -07:00
|
|
|
vector, err := rule.eval(now, g.opts.QueryEngine, g.opts.QueryCtx, g.opts.ExternalURL.Path)
|
2015-12-14 08:40:40 -08:00
|
|
|
if err != nil {
|
2016-01-18 07:53:37 -08:00
|
|
|
// Canceled queries are intentional termination of queries. This normally
|
|
|
|
// happens on shutdown and thus we skip logging of any errors here.
|
|
|
|
if _, ok := err.(promql.ErrQueryCanceled); !ok {
|
|
|
|
log.Warnf("Error while evaluating rule %q: %s", rule, err)
|
|
|
|
}
|
2016-01-21 02:09:24 -08:00
|
|
|
evalFailures.WithLabelValues(rtyp).Inc()
|
|
|
|
return
|
2015-12-14 08:40:40 -08:00
|
|
|
}
|
|
|
|
|
2016-01-21 02:09:24 -08:00
|
|
|
if ar, ok := rule.(*AlertingRule); ok {
|
|
|
|
g.sendAlerts(ar, now)
|
2015-12-14 08:40:40 -08:00
|
|
|
}
|
2016-05-19 07:22:49 -07:00
|
|
|
var (
|
|
|
|
numOutOfOrder = 0
|
|
|
|
numDuplicates = 0
|
|
|
|
)
|
2015-12-14 08:40:40 -08:00
|
|
|
for _, s := range vector {
|
2016-05-19 07:22:49 -07:00
|
|
|
if err := g.opts.SampleAppender.Append(s); err != nil {
|
|
|
|
switch err {
|
|
|
|
case local.ErrOutOfOrderSample:
|
|
|
|
numOutOfOrder++
|
|
|
|
log.With("sample", s).With("error", err).Debug("Rule evaluation result discarded")
|
|
|
|
case local.ErrDuplicateSampleForTimestamp:
|
|
|
|
numDuplicates++
|
|
|
|
log.With("sample", s).With("error", err).Debug("Rule evaluation result discarded")
|
|
|
|
default:
|
|
|
|
log.With("sample", s).With("error", err).Warn("Rule evaluation result discarded")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if numOutOfOrder > 0 {
|
|
|
|
log.With("numDropped", numOutOfOrder).Warn("Error on ingesting out-of-order result from rule evaluation")
|
|
|
|
}
|
|
|
|
if numDuplicates > 0 {
|
|
|
|
log.With("numDropped", numDuplicates).Warn("Error on ingesting results from rule evaluation with different value but same timestamp")
|
2015-12-14 08:40:40 -08:00
|
|
|
}
|
|
|
|
}(rule)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
2015-12-17 02:46:10 -08:00
|
|
|
// sendAlerts sends alert notifications for the given rule.
|
2015-12-14 08:40:40 -08:00
|
|
|
func (g *Group) sendAlerts(rule *AlertingRule, timestamp model.Time) error {
|
|
|
|
var alerts model.Alerts
|
2015-12-15 10:46:03 -08:00
|
|
|
|
2015-12-17 02:46:10 -08:00
|
|
|
for _, alert := range rule.currentAlerts() {
|
2015-12-14 08:40:40 -08:00
|
|
|
// Only send actually firing alerts.
|
2015-12-15 10:46:03 -08:00
|
|
|
if alert.State == StatePending {
|
2013-07-30 08:18:07 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2015-12-15 10:46:03 -08:00
|
|
|
a := &model.Alert{
|
|
|
|
StartsAt: alert.ActiveAt.Add(rule.holdDuration).Time(),
|
2016-07-12 09:11:31 -07:00
|
|
|
Labels: alert.Labels,
|
|
|
|
Annotations: alert.Annotations,
|
2015-12-14 08:40:40 -08:00
|
|
|
GeneratorURL: g.opts.ExternalURL.String() + strutil.GraphLinkForExpression(rule.vector.String()),
|
2015-12-15 10:46:03 -08:00
|
|
|
}
|
|
|
|
if alert.ResolvedAt != 0 {
|
|
|
|
a.EndsAt = alert.ResolvedAt.Time()
|
|
|
|
}
|
|
|
|
|
|
|
|
alerts = append(alerts, a)
|
2013-07-30 08:18:07 -07:00
|
|
|
}
|
2013-05-23 12:29:27 -07:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
if len(alerts) > 0 {
|
2016-03-01 03:37:22 -08:00
|
|
|
g.opts.Notifier.Send(alerts...)
|
2015-12-14 08:40:40 -08:00
|
|
|
}
|
2013-12-10 07:54:35 -08:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
return nil
|
|
|
|
}
|
2013-12-10 07:54:35 -08:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
// The Manager manages recording and alerting rules.
|
|
|
|
type Manager struct {
|
|
|
|
opts *ManagerOptions
|
|
|
|
groups map[string]*Group
|
|
|
|
mtx sync.RWMutex
|
2016-01-08 08:51:22 -08:00
|
|
|
block chan struct{}
|
2015-12-14 08:40:40 -08:00
|
|
|
}
|
2013-07-30 08:18:07 -07:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
// ManagerOptions bundles options for the Manager.
|
|
|
|
type ManagerOptions struct {
|
2016-03-01 03:37:22 -08:00
|
|
|
ExternalURL *url.URL
|
|
|
|
QueryEngine *promql.Engine
|
promql: Allow per-query contexts.
For Weaveworks' Frankenstein, we need to support multitenancy. In
Frankenstein, we initially solved this without modifying the promql
package at all: we constructed a new promql.Engine for every
query and injected a storage implementation into that engine which would
be primed to only collect data for a given user.
This is problematic to upstream, however. Prometheus assumes that there
is only one engine: the query concurrency gate is part of the engine,
and the engine contains one central cancellable context to shut down all
queries. Also, creating a new engine for every query seems like overkill.
Thus, we want to be able to pass per-query contexts into a single engine.
This change gets rid of the promql.Engine's built-in base context and
allows passing in a per-query context instead. Central cancellation of
all queries is still possible by deriving all passed-in contexts from
one central one, but this is now the responsibility of the caller. The
central query context is now created in main() and passed into the
relevant components (web handler / API, rule manager).
In a next step, the per-query context would have to be passed to the
storage implementation, so that the storage can implement multi-tenancy
or other features based on the contextual information.
2016-09-15 04:52:50 -07:00
|
|
|
QueryCtx context.Context
|
2016-03-01 03:37:22 -08:00
|
|
|
Notifier *notifier.Notifier
|
|
|
|
SampleAppender storage.SampleAppender
|
2015-12-14 08:40:40 -08:00
|
|
|
}
|
2015-03-14 19:36:15 -07:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
// NewManager returns an implementation of Manager, ready to be started
|
|
|
|
// by calling the Run method.
|
|
|
|
func NewManager(o *ManagerOptions) *Manager {
|
|
|
|
manager := &Manager{
|
|
|
|
groups: map[string]*Group{},
|
|
|
|
opts: o,
|
2016-01-08 08:51:22 -08:00
|
|
|
block: make(chan struct{}),
|
2013-01-07 14:24:26 -08:00
|
|
|
}
|
2015-12-14 08:40:40 -08:00
|
|
|
return manager
|
2013-01-07 14:24:26 -08:00
|
|
|
}
|
|
|
|
|
2016-01-08 08:51:22 -08:00
|
|
|
// Run starts processing of the rule manager.
|
|
|
|
func (m *Manager) Run() {
|
|
|
|
close(m.block)
|
|
|
|
}
|
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
// Stop the rule manager's rule evaluation cycles.
|
|
|
|
func (m *Manager) Stop() {
|
|
|
|
log.Info("Stopping rule manager...")
|
2015-06-30 02:51:05 -07:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
for _, eg := range m.groups {
|
|
|
|
eg.stop()
|
2015-06-30 02:51:05 -07:00
|
|
|
}
|
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
log.Info("Rule manager stopped.")
|
2015-06-30 02:51:05 -07:00
|
|
|
}
|
|
|
|
|
2015-05-12 07:52:56 -07:00
|
|
|
// ApplyConfig updates the rule manager's state as the config requires. If
|
2016-07-11 07:24:54 -07:00
|
|
|
// loading the new rules failed the old rule set is restored.
|
|
|
|
func (m *Manager) ApplyConfig(conf *config.Config) error {
|
2015-12-14 08:40:40 -08:00
|
|
|
m.mtx.Lock()
|
|
|
|
defer m.mtx.Unlock()
|
2015-05-12 07:52:56 -07:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
// Get all rule files and load the groups they define.
|
2015-05-27 22:36:21 -07:00
|
|
|
var files []string
|
|
|
|
for _, pat := range conf.RuleFiles {
|
|
|
|
fs, err := filepath.Glob(pat)
|
|
|
|
if err != nil {
|
|
|
|
// The only error can be a bad pattern.
|
2016-07-11 07:24:54 -07:00
|
|
|
return fmt.Errorf("error retrieving rule files for %s: %s", pat, err)
|
2015-05-27 22:36:21 -07:00
|
|
|
}
|
|
|
|
files = append(files, fs...)
|
|
|
|
}
|
2015-12-14 08:40:40 -08:00
|
|
|
|
|
|
|
groups, err := m.loadGroups(files...)
|
|
|
|
if err != nil {
|
2016-07-11 07:24:54 -07:00
|
|
|
return fmt.Errorf("error loading rules, previous rule set restored: %s", err)
|
2015-05-12 07:52:56 -07:00
|
|
|
}
|
2015-06-23 03:07:53 -07:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
for _, newg := range groups {
|
|
|
|
// To be replaced with a configurable per-group interval.
|
|
|
|
newg.interval = time.Duration(conf.GlobalConfig.EvaluationInterval)
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
// If there is an old group with the same identifier, stop it and wait for
|
|
|
|
// it to finish the current iteration. Then copy its into the new group.
|
|
|
|
oldg, ok := m.groups[newg.name]
|
|
|
|
delete(m.groups, newg.name)
|
|
|
|
|
|
|
|
go func(newg *Group) {
|
|
|
|
if ok {
|
|
|
|
oldg.stop()
|
|
|
|
newg.copyState(oldg)
|
|
|
|
}
|
2016-01-08 08:51:22 -08:00
|
|
|
go func() {
|
|
|
|
// Wait with starting evaluation until the rule manager
|
|
|
|
// is told to run. This is necessary to avoid running
|
|
|
|
// queries against a bootstrapping storage.
|
|
|
|
<-m.block
|
|
|
|
newg.run()
|
|
|
|
}()
|
2015-12-14 08:40:40 -08:00
|
|
|
wg.Done()
|
|
|
|
}(newg)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop remaining old groups.
|
|
|
|
for _, oldg := range m.groups {
|
|
|
|
oldg.stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
m.groups = groups
|
|
|
|
|
2016-07-11 07:24:54 -07:00
|
|
|
return nil
|
2015-05-12 07:52:56 -07:00
|
|
|
}
|
|
|
|
|
2015-12-17 02:46:10 -08:00
|
|
|
// loadGroups reads groups from a list of files.
|
|
|
|
// As there's currently no group syntax a single group named "default" containing
|
|
|
|
// all rules will be returned.
|
2015-12-14 08:40:40 -08:00
|
|
|
func (m *Manager) loadGroups(filenames ...string) (map[string]*Group, error) {
|
|
|
|
groups := map[string]*Group{}
|
|
|
|
|
|
|
|
// Currently there is no group syntax implemented. Thus all rules
|
|
|
|
// are read into a single default group.
|
|
|
|
g := newGroup("default", m.opts)
|
|
|
|
groups[g.name] = g
|
|
|
|
|
2015-04-29 02:08:56 -07:00
|
|
|
for _, fn := range filenames {
|
|
|
|
content, err := ioutil.ReadFile(fn)
|
|
|
|
if err != nil {
|
2015-12-14 08:40:40 -08:00
|
|
|
return nil, err
|
2015-04-29 02:08:56 -07:00
|
|
|
}
|
|
|
|
stmts, err := promql.ParseStmts(string(content))
|
|
|
|
if err != nil {
|
2015-12-14 08:40:40 -08:00
|
|
|
return nil, fmt.Errorf("error parsing %s: %s", fn, err)
|
2015-04-29 02:08:56 -07:00
|
|
|
}
|
2015-07-03 05:48:22 -07:00
|
|
|
|
2015-04-29 02:08:56 -07:00
|
|
|
for _, stmt := range stmts {
|
2015-12-14 08:40:40 -08:00
|
|
|
var rule Rule
|
|
|
|
|
2015-04-29 02:08:56 -07:00
|
|
|
switch r := stmt.(type) {
|
|
|
|
case *promql.AlertStmt:
|
2015-12-14 08:40:40 -08:00
|
|
|
rule = NewAlertingRule(r.Name, r.Expr, r.Duration, r.Labels, r.Annotations)
|
|
|
|
|
2015-04-29 02:08:56 -07:00
|
|
|
case *promql.RecordStmt:
|
2015-12-14 08:40:40 -08:00
|
|
|
rule = NewRecordingRule(r.Name, r.Expr, r.Labels)
|
|
|
|
|
2015-04-29 02:08:56 -07:00
|
|
|
default:
|
|
|
|
panic("retrieval.Manager.LoadRuleFiles: unknown statement type")
|
|
|
|
}
|
2015-12-14 08:40:40 -08:00
|
|
|
g.rules = append(g.rules, rule)
|
2015-04-29 02:08:56 -07:00
|
|
|
}
|
|
|
|
}
|
2015-12-14 08:40:40 -08:00
|
|
|
|
|
|
|
return groups, nil
|
2013-01-07 14:24:26 -08:00
|
|
|
}
|
2013-06-11 02:00:55 -07:00
|
|
|
|
2015-04-29 02:08:56 -07:00
|
|
|
// Rules returns the list of the manager's rules.
|
2015-04-29 01:26:49 -07:00
|
|
|
func (m *Manager) Rules() []Rule {
|
2015-12-14 08:40:40 -08:00
|
|
|
m.mtx.RLock()
|
|
|
|
defer m.mtx.RUnlock()
|
|
|
|
|
|
|
|
var rules []Rule
|
|
|
|
for _, g := range m.groups {
|
|
|
|
rules = append(rules, g.rules...)
|
|
|
|
}
|
2013-06-11 02:00:55 -07:00
|
|
|
|
|
|
|
return rules
|
|
|
|
}
|
2013-06-13 07:10:05 -07:00
|
|
|
|
2015-04-29 02:08:56 -07:00
|
|
|
// AlertingRules returns the list of the manager's alerting rules.
|
2015-04-29 01:26:49 -07:00
|
|
|
func (m *Manager) AlertingRules() []*AlertingRule {
|
2015-12-14 08:40:40 -08:00
|
|
|
m.mtx.RLock()
|
|
|
|
defer m.mtx.RUnlock()
|
2013-06-13 07:10:05 -07:00
|
|
|
|
2015-03-30 10:43:19 -07:00
|
|
|
alerts := []*AlertingRule{}
|
2015-12-14 08:40:40 -08:00
|
|
|
for _, rule := range m.Rules() {
|
2015-03-30 10:43:19 -07:00
|
|
|
if alertingRule, ok := rule.(*AlertingRule); ok {
|
2013-06-13 07:10:05 -07:00
|
|
|
alerts = append(alerts, alertingRule)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return alerts
|
|
|
|
}
|