2015-01-21 11:07:45 -08:00
|
|
|
// Copyright 2013 The Prometheus Authors
|
2013-01-07 14:24:26 -08:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2015-03-30 10:43:19 -07:00
|
|
|
package rules
|
2013-01-07 14:24:26 -08:00
|
|
|
|
|
|
|
import (
|
2017-10-24 21:21:42 -07:00
|
|
|
"context"
|
2022-06-17 00:54:25 -07:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2015-06-30 05:38:01 -07:00
|
|
|
"net/url"
|
2023-09-21 13:53:51 -07:00
|
|
|
"strings"
|
2013-04-17 05:42:15 -07:00
|
|
|
"sync"
|
2013-01-07 14:24:26 -08:00
|
|
|
"time"
|
|
|
|
|
2021-06-11 09:17:59 -07:00
|
|
|
"github.com/go-kit/log"
|
|
|
|
"github.com/go-kit/log/level"
|
2014-06-18 10:43:15 -07:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2023-07-02 15:16:26 -07:00
|
|
|
"golang.org/x/exp/slices"
|
2019-03-25 16:01:12 -07:00
|
|
|
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
|
|
|
"github.com/prometheus/prometheus/model/rulefmt"
|
2022-10-07 07:58:17 -07:00
|
|
|
"github.com/prometheus/prometheus/notifier"
|
2015-03-30 10:43:19 -07:00
|
|
|
"github.com/prometheus/prometheus/promql"
|
2020-02-03 10:06:39 -08:00
|
|
|
"github.com/prometheus/prometheus/promql/parser"
|
2015-03-14 19:36:15 -07:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2022-10-07 07:58:17 -07:00
|
|
|
"github.com/prometheus/prometheus/util/strutil"
|
2013-06-25 05:02:27 -07:00
|
|
|
)
|
2013-01-07 14:24:26 -08:00
|
|
|
|
2017-11-23 04:04:54 -08:00
|
|
|
// QueryFunc processes PromQL queries.
|
|
|
|
type QueryFunc func(ctx context.Context, q string, t time.Time) (promql.Vector, error)
|
|
|
|
|
|
|
|
// EngineQueryFunc returns a new query function that executes instant queries against
|
|
|
|
// the given engine.
|
2018-07-17 20:54:33 -07:00
|
|
|
// It converts scalar into vector results.
|
2018-01-09 08:44:23 -08:00
|
|
|
func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc {
|
2017-11-23 04:04:54 -08:00
|
|
|
return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
|
2023-04-17 21:32:38 -07:00
|
|
|
q, err := engine.NewInstantQuery(ctx, q, nil, qs, t)
|
2017-11-23 04:04:54 -08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
res := q.Exec(ctx)
|
|
|
|
if res.Err != nil {
|
|
|
|
return nil, res.Err
|
|
|
|
}
|
|
|
|
switch v := res.Value.(type) {
|
|
|
|
case promql.Vector:
|
|
|
|
return v, nil
|
|
|
|
case promql.Scalar:
|
|
|
|
return promql.Vector{promql.Sample{
|
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
|
|
|
T: v.T,
|
|
|
|
F: v.V,
|
2017-11-23 04:04:54 -08:00
|
|
|
Metric: labels.Labels{},
|
|
|
|
}}, nil
|
|
|
|
default:
|
2019-03-25 16:01:12 -07:00
|
|
|
return nil, errors.New("rule result is not a vector or scalar")
|
2017-11-23 04:04:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-04 11:21:13 -07:00
|
|
|
// DefaultEvalIterationFunc is the default implementation of
|
|
|
|
// GroupEvalIterationFunc that is periodically invoked to evaluate the rules
|
|
|
|
// in a group at a given point in time and updates Group state and metrics
|
|
|
|
// accordingly. Custom GroupEvalIterationFunc implementations are recommended
|
|
|
|
// to invoke this function as well, to ensure correct Group state and metrics
|
|
|
|
// are maintained.
|
|
|
|
func DefaultEvalIterationFunc(ctx context.Context, g *Group, evalTimestamp time.Time) {
|
|
|
|
g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Inc()
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
g.Eval(ctx, evalTimestamp)
|
|
|
|
timeSinceStart := time.Since(start)
|
|
|
|
|
|
|
|
g.metrics.IterationDuration.Observe(timeSinceStart.Seconds())
|
|
|
|
g.setEvaluationTime(timeSinceStart)
|
|
|
|
g.setLastEvaluation(start)
|
|
|
|
g.setLastEvalTimestamp(evalTimestamp)
|
2022-03-28 17:16:46 -07:00
|
|
|
}
|
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
// The Manager manages recording and alerting rules.
|
|
|
|
type Manager struct {
|
2018-08-02 03:18:24 -07:00
|
|
|
opts *ManagerOptions
|
|
|
|
groups map[string]*Group
|
|
|
|
mtx sync.RWMutex
|
|
|
|
block chan struct{}
|
2020-02-12 07:22:18 -08:00
|
|
|
done chan struct{}
|
2018-08-02 03:18:24 -07:00
|
|
|
restored bool
|
2017-06-16 03:22:44 -07:00
|
|
|
|
|
|
|
logger log.Logger
|
2015-12-14 08:40:40 -08:00
|
|
|
}
|
2013-07-30 08:18:07 -07:00
|
|
|
|
2017-11-23 23:59:05 -08:00
|
|
|
// NotifyFunc sends notifications about a set of alerts generated by the given expression.
|
2018-08-04 12:31:12 -07:00
|
|
|
type NotifyFunc func(ctx context.Context, expr string, alerts ...*Alert)
|
2017-11-23 23:59:05 -08:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
// ManagerOptions bundles options for the Manager.
|
|
|
|
type ManagerOptions struct {
|
2018-08-02 03:18:24 -07:00
|
|
|
ExternalURL *url.URL
|
|
|
|
QueryFunc QueryFunc
|
|
|
|
NotifyFunc NotifyFunc
|
|
|
|
Context context.Context
|
2020-02-06 07:58:38 -08:00
|
|
|
Appendable storage.Appendable
|
2020-06-26 11:06:36 -07:00
|
|
|
Queryable storage.Queryable
|
2018-08-02 03:18:24 -07:00
|
|
|
Logger log.Logger
|
|
|
|
Registerer prometheus.Registerer
|
|
|
|
OutageTolerance time.Duration
|
|
|
|
ForGracePeriod time.Duration
|
2018-08-27 09:41:42 -07:00
|
|
|
ResendDelay time.Duration
|
2020-07-22 07:19:34 -07:00
|
|
|
GroupLoader GroupLoader
|
2018-12-28 02:20:29 -08:00
|
|
|
|
2019-01-03 04:07:06 -08:00
|
|
|
Metrics *Metrics
|
2015-12-14 08:40:40 -08:00
|
|
|
}
|
2015-03-14 19:36:15 -07:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
// NewManager returns an implementation of Manager, ready to be started
|
|
|
|
// by calling the Run method.
|
|
|
|
func NewManager(o *ManagerOptions) *Manager {
|
2019-01-03 04:07:06 -08:00
|
|
|
if o.Metrics == nil {
|
|
|
|
o.Metrics = NewGroupMetrics(o.Registerer)
|
2018-12-28 02:20:29 -08:00
|
|
|
}
|
|
|
|
|
2020-07-22 07:19:34 -07:00
|
|
|
if o.GroupLoader == nil {
|
|
|
|
o.GroupLoader = FileLoader{}
|
|
|
|
}
|
|
|
|
|
2017-11-30 06:36:34 -08:00
|
|
|
m := &Manager{
|
2015-12-14 08:40:40 -08:00
|
|
|
groups: map[string]*Group{},
|
|
|
|
opts: o,
|
2016-01-08 08:51:22 -08:00
|
|
|
block: make(chan struct{}),
|
2020-02-12 07:22:18 -08:00
|
|
|
done: make(chan struct{}),
|
2017-06-16 03:22:44 -07:00
|
|
|
logger: o.Logger,
|
2013-01-07 14:24:26 -08:00
|
|
|
}
|
2018-12-28 02:20:29 -08:00
|
|
|
|
2017-11-30 06:36:34 -08:00
|
|
|
return m
|
2013-01-07 14:24:26 -08:00
|
|
|
}
|
|
|
|
|
2020-07-21 15:13:24 -07:00
|
|
|
// Run starts processing of the rule manager. It is blocking.
|
2016-01-08 08:51:22 -08:00
|
|
|
func (m *Manager) Run() {
|
2022-05-20 14:26:06 -07:00
|
|
|
level.Info(m.logger).Log("msg", "Starting rule manager...")
|
2020-07-21 15:13:24 -07:00
|
|
|
m.start()
|
|
|
|
<-m.done
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Manager) start() {
|
2016-01-08 08:51:22 -08:00
|
|
|
close(m.block)
|
|
|
|
}
|
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
// Stop the rule manager's rule evaluation cycles.
|
|
|
|
func (m *Manager) Stop() {
|
rules/manager.go: Fix race between reload and stop
On one relatively large Prometheus instance (1.7M series), I noticed
that upgrades were frequently resulting in Prometheus undergoing crash
recovery on start-up.
On closer examination, I found that Prometheus was panicking on
shutdown.
It seems that our configuration management (or misconfiguration thereof)
is reloading Prometheus then immediately restarting it, which I suspect
is causing this race:
Sep 21 15:12:42 host systemd[1]: Reloading prometheus monitoring system.
Sep 21 15:12:42 host prometheus[18734]: time="2016-09-21T15:12:42Z" level=info msg="Loading configuration file /etc/prometheus/config.yaml" source="main.go:221"
Sep 21 15:12:42 host systemd[1]: Reloaded prometheus monitoring system.
Sep 21 15:12:44 host systemd[1]: Stopping prometheus monitoring system...
Sep 21 15:12:44 host prometheus[18734]: time="2016-09-21T15:12:44Z" level=warning msg="Received SIGTERM, exiting gracefully..." source="main.go:203"
Sep 21 15:12:44 host prometheus[18734]: time="2016-09-21T15:12:44Z" level=info msg="See you next time!" source="main.go:210"
Sep 21 15:12:44 host prometheus[18734]: time="2016-09-21T15:12:44Z" level=info msg="Stopping target manager..." source="targetmanager.go:90"
Sep 21 15:12:52 host prometheus[18734]: time="2016-09-21T15:12:52Z" level=info msg="Checkpointing in-memory metrics and chunks..." source="persistence.go:548"
Sep 21 15:12:56 host prometheus[18734]: time="2016-09-21T15:12:56Z" level=warning msg="Error on ingesting out-of-order samples" numDropped=1 source="scrape.go:467"
Sep 21 15:12:56 host prometheus[18734]: time="2016-09-21T15:12:56Z" level=error msg="Error adding file watch for \"/etc/prometheus/targets\": no such file or directory" source="file.go:84"
Sep 21 15:12:56 host prometheus[18734]: time="2016-09-21T15:12:56Z" level=error msg="Error adding file watch for \"/etc/prometheus/targets\": no such file or directory" source="file.go:84"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Stopping rule manager..." source="manager.go:366"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Rule manager stopped." source="manager.go:372"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Stopping notification handler..." source="notifier.go:325"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Stopping local storage..." source="storage.go:381"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Stopping maintenance loop..." source="storage.go:383"
Sep 21 15:13:01 host prometheus[18734]: panic: close of closed channel
Sep 21 15:13:01 host prometheus[18734]: goroutine 7686074 [running]:
Sep 21 15:13:01 host prometheus[18734]: panic(0xba57a0, 0xc60c42b500)
Sep 21 15:13:01 host prometheus[18734]: /usr/local/go/src/runtime/panic.go:500 +0x1a1
Sep 21 15:13:01 host prometheus[18734]: github.com/prometheus/prometheus/rules.(*Manager).ApplyConfig.func1(0xc6645a9901, 0xc420271ef0, 0xc420338ed0, 0xc60c42b4f0, 0xc6645a9900)
Sep 21 15:13:01 host prometheus[18734]: /home/build/packages/prometheus/tmp/build/gopath/src/github.com/prometheus/prometheus/rules/manager.go:412 +0x3c
Sep 21 15:13:01 host prometheus[18734]: created by github.com/prometheus/prometheus/rules.(*Manager).ApplyConfig
Sep 21 15:13:01 host prometheus[18734]: /home/build/packages/prometheus/tmp/build/gopath/src/github.com/prometheus/prometheus/rules/manager.go:423 +0x56b
Sep 21 15:13:03 host systemd[1]: prometheus.service: main process exited, code=exited, status=2/INVALIDARGUMENT
2016-09-21 14:03:02 -07:00
|
|
|
m.mtx.Lock()
|
|
|
|
defer m.mtx.Unlock()
|
|
|
|
|
2017-08-11 11:45:52 -07:00
|
|
|
level.Info(m.logger).Log("msg", "Stopping rule manager...")
|
2015-06-30 02:51:05 -07:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
for _, eg := range m.groups {
|
|
|
|
eg.stop()
|
2015-06-30 02:51:05 -07:00
|
|
|
}
|
|
|
|
|
2020-02-12 07:22:18 -08:00
|
|
|
// Shut down the groups waiting multiple evaluation intervals to write
|
|
|
|
// staleness markers.
|
|
|
|
close(m.done)
|
|
|
|
|
2017-08-11 11:45:52 -07:00
|
|
|
level.Info(m.logger).Log("msg", "Rule manager stopped")
|
2015-06-30 02:51:05 -07:00
|
|
|
}
|
|
|
|
|
2017-11-23 06:48:14 -08:00
|
|
|
// Update the rule manager's state as the config requires. If
|
2016-07-11 07:24:54 -07:00
|
|
|
// loading the new rules failed the old rule set is restored.
|
2023-04-04 11:21:13 -07:00
|
|
|
func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc) error {
|
2015-12-14 08:40:40 -08:00
|
|
|
m.mtx.Lock()
|
|
|
|
defer m.mtx.Unlock()
|
2015-05-12 07:52:56 -07:00
|
|
|
|
2023-04-04 11:21:13 -07:00
|
|
|
groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, files...)
|
2022-03-28 17:16:46 -07:00
|
|
|
|
2017-06-16 04:14:33 -07:00
|
|
|
if errs != nil {
|
|
|
|
for _, e := range errs {
|
2017-08-11 11:45:52 -07:00
|
|
|
level.Error(m.logger).Log("msg", "loading groups failed", "err", e)
|
2017-06-16 04:14:33 -07:00
|
|
|
}
|
|
|
|
return errors.New("error loading rules, previous rule set restored")
|
2015-05-12 07:52:56 -07:00
|
|
|
}
|
2018-08-02 03:18:24 -07:00
|
|
|
m.restored = true
|
2015-06-23 03:07:53 -07:00
|
|
|
|
2015-12-14 08:40:40 -08:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
for _, newg := range groups {
|
2019-12-19 02:41:11 -08:00
|
|
|
// If there is an old group with the same identifier,
|
|
|
|
// check if new group equals with the old group, if yes then skip it.
|
|
|
|
// If not equals, stop it and wait for it to finish the current iteration.
|
|
|
|
// Then copy it into the new group.
|
2020-09-13 08:07:59 -07:00
|
|
|
gn := GroupKey(newg.file, newg.name)
|
2017-11-01 04:58:00 -07:00
|
|
|
oldg, ok := m.groups[gn]
|
|
|
|
delete(m.groups, gn)
|
2015-12-14 08:40:40 -08:00
|
|
|
|
2019-12-19 02:41:11 -08:00
|
|
|
if ok && oldg.Equals(newg) {
|
|
|
|
groups[gn] = oldg
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Add(1)
|
2020-03-01 10:32:14 -08:00
|
|
|
go func(newg *Group) {
|
2015-12-14 08:40:40 -08:00
|
|
|
if ok {
|
|
|
|
oldg.stop()
|
2018-07-18 06:14:38 -07:00
|
|
|
newg.CopyState(oldg)
|
2015-12-14 08:40:40 -08:00
|
|
|
}
|
|
|
|
wg.Done()
|
2020-09-21 03:29:03 -07:00
|
|
|
// Wait with starting evaluation until the rule manager
|
|
|
|
// is told to run. This is necessary to avoid running
|
|
|
|
// queries against a bootstrapping storage.
|
|
|
|
<-m.block
|
|
|
|
newg.run(m.opts.Context)
|
2020-03-01 10:32:14 -08:00
|
|
|
}(newg)
|
2015-12-14 08:40:40 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stop remaining old groups.
|
2020-02-12 07:22:18 -08:00
|
|
|
wg.Add(len(m.groups))
|
2020-01-27 04:41:32 -08:00
|
|
|
for n, oldg := range m.groups {
|
2020-02-12 07:22:18 -08:00
|
|
|
go func(n string, g *Group) {
|
2020-04-18 05:32:18 -07:00
|
|
|
g.markStale = true
|
|
|
|
g.stop()
|
2020-02-12 07:22:18 -08:00
|
|
|
if m := g.metrics; m != nil {
|
2021-04-30 10:25:34 -07:00
|
|
|
m.IterationsMissed.DeleteLabelValues(n)
|
|
|
|
m.IterationsScheduled.DeleteLabelValues(n)
|
|
|
|
m.EvalTotal.DeleteLabelValues(n)
|
|
|
|
m.EvalFailures.DeleteLabelValues(n)
|
|
|
|
m.GroupInterval.DeleteLabelValues(n)
|
|
|
|
m.GroupLastEvalTime.DeleteLabelValues(n)
|
|
|
|
m.GroupLastDuration.DeleteLabelValues(n)
|
|
|
|
m.GroupRules.DeleteLabelValues(n)
|
|
|
|
m.GroupSamples.DeleteLabelValues((n))
|
2020-02-12 07:22:18 -08:00
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}(n, oldg)
|
2015-12-14 08:40:40 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
m.groups = groups
|
|
|
|
|
2016-07-11 07:24:54 -07:00
|
|
|
return nil
|
2015-05-12 07:52:56 -07:00
|
|
|
}
|
|
|
|
|
2020-07-22 07:19:34 -07:00
|
|
|
// GroupLoader is responsible for loading rule groups from arbitrary sources and parsing them.
|
|
|
|
type GroupLoader interface {
|
|
|
|
Load(identifier string) (*rulefmt.RuleGroups, []error)
|
|
|
|
Parse(query string) (parser.Expr, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FileLoader is the default GroupLoader implementation. It defers to rulefmt.ParseFile
|
2023-10-03 13:09:25 -07:00
|
|
|
// and parser.ParseExpr.
|
2020-07-22 07:19:34 -07:00
|
|
|
type FileLoader struct{}
|
|
|
|
|
|
|
|
func (FileLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) {
|
|
|
|
return rulefmt.ParseFile(identifier)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (FileLoader) Parse(query string) (parser.Expr, error) { return parser.ParseExpr(query) }
|
|
|
|
|
2018-09-25 09:06:26 -07:00
|
|
|
// LoadGroups reads groups from a list of files.
|
2019-04-15 09:52:58 -07:00
|
|
|
func (m *Manager) LoadGroups(
|
2023-04-04 11:21:13 -07:00
|
|
|
interval time.Duration, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc, filenames ...string,
|
2019-04-15 09:52:58 -07:00
|
|
|
) (map[string]*Group, []error) {
|
2017-06-12 05:44:39 -07:00
|
|
|
groups := make(map[string]*Group)
|
|
|
|
|
2018-08-02 03:18:24 -07:00
|
|
|
shouldRestore := !m.restored
|
|
|
|
|
2015-04-29 02:08:56 -07:00
|
|
|
for _, fn := range filenames {
|
2020-07-22 07:19:34 -07:00
|
|
|
rgs, errs := m.opts.GroupLoader.Load(fn)
|
2017-06-13 23:49:21 -07:00
|
|
|
if errs != nil {
|
2017-06-16 04:14:33 -07:00
|
|
|
return nil, errs
|
2015-04-29 02:08:56 -07:00
|
|
|
}
|
2015-07-03 05:48:22 -07:00
|
|
|
|
2017-06-12 05:44:39 -07:00
|
|
|
for _, rg := range rgs.Groups {
|
|
|
|
itv := interval
|
2017-06-15 22:16:21 -07:00
|
|
|
if rg.Interval != 0 {
|
|
|
|
itv = time.Duration(rg.Interval)
|
2017-06-12 05:44:39 -07:00
|
|
|
}
|
2015-12-14 08:40:40 -08:00
|
|
|
|
2017-06-13 22:43:00 -07:00
|
|
|
rules := make([]Rule, 0, len(rg.Rules))
|
2017-06-12 05:44:39 -07:00
|
|
|
for _, r := range rg.Rules {
|
2020-07-22 07:19:34 -07:00
|
|
|
expr, err := m.opts.GroupLoader.Parse(r.Expr.Value)
|
2017-06-12 05:44:39 -07:00
|
|
|
if err != nil {
|
2022-06-17 00:54:25 -07:00
|
|
|
return nil, []error{fmt.Errorf("%s: %w", fn, err)}
|
2017-06-12 05:44:39 -07:00
|
|
|
}
|
2015-12-14 08:40:40 -08:00
|
|
|
|
2020-01-15 10:07:54 -08:00
|
|
|
if r.Alert.Value != "" {
|
2017-06-12 05:44:39 -07:00
|
|
|
rules = append(rules, NewAlertingRule(
|
2020-01-15 10:07:54 -08:00
|
|
|
r.Alert.Value,
|
2017-06-12 05:44:39 -07:00
|
|
|
expr,
|
2017-06-15 22:16:21 -07:00
|
|
|
time.Duration(r.For),
|
2023-01-09 03:21:38 -08:00
|
|
|
time.Duration(r.KeepFiringFor),
|
2017-06-12 05:44:39 -07:00
|
|
|
labels.FromMap(r.Labels),
|
|
|
|
labels.FromMap(r.Annotations),
|
2019-04-15 09:52:58 -07:00
|
|
|
externalLabels,
|
2021-05-30 20:35:26 -07:00
|
|
|
externalURL,
|
2018-08-02 03:18:24 -07:00
|
|
|
m.restored,
|
2017-08-11 11:45:52 -07:00
|
|
|
log.With(m.logger, "alert", r.Alert),
|
2017-06-12 05:44:39 -07:00
|
|
|
))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
rules = append(rules, NewRecordingRule(
|
2020-01-15 10:07:54 -08:00
|
|
|
r.Record.Value,
|
2017-06-12 05:44:39 -07:00
|
|
|
expr,
|
|
|
|
labels.FromMap(r.Labels),
|
|
|
|
))
|
2015-04-29 02:08:56 -07:00
|
|
|
}
|
2017-06-12 05:44:39 -07:00
|
|
|
|
2020-09-13 08:07:59 -07:00
|
|
|
groups[GroupKey(fn, rg.Name)] = NewGroup(GroupOptions{
|
2023-04-04 11:21:13 -07:00
|
|
|
Name: rg.Name,
|
|
|
|
File: fn,
|
|
|
|
Interval: itv,
|
|
|
|
Limit: rg.Limit,
|
|
|
|
Rules: rules,
|
|
|
|
ShouldRestore: shouldRestore,
|
|
|
|
Opts: m.opts,
|
|
|
|
done: m.done,
|
|
|
|
EvalIterationFunc: groupEvalIterationFunc,
|
2020-02-12 07:22:18 -08:00
|
|
|
})
|
2015-04-29 02:08:56 -07:00
|
|
|
}
|
|
|
|
}
|
2015-12-14 08:40:40 -08:00
|
|
|
|
|
|
|
return groups, nil
|
2013-01-07 14:24:26 -08:00
|
|
|
}
|
2013-06-11 02:00:55 -07:00
|
|
|
|
2017-06-14 03:39:14 -07:00
|
|
|
// RuleGroups returns the list of manager's rule groups.
|
|
|
|
func (m *Manager) RuleGroups() []*Group {
|
|
|
|
m.mtx.RLock()
|
|
|
|
defer m.mtx.RUnlock()
|
|
|
|
|
|
|
|
rgs := make([]*Group, 0, len(m.groups))
|
|
|
|
for _, g := range m.groups {
|
|
|
|
rgs = append(rgs, g)
|
|
|
|
}
|
|
|
|
|
2023-09-21 13:53:51 -07:00
|
|
|
slices.SortFunc(rgs, func(a, b *Group) int {
|
|
|
|
fileCompare := strings.Compare(a.file, b.file)
|
|
|
|
|
|
|
|
// If its 0, then the file names are the same.
|
|
|
|
// Lets look at the group names in that case.
|
|
|
|
if fileCompare != 0 {
|
|
|
|
return fileCompare
|
2019-02-23 00:51:44 -08:00
|
|
|
}
|
2023-09-21 13:53:51 -07:00
|
|
|
return strings.Compare(a.name, b.name)
|
2017-06-14 03:39:14 -07:00
|
|
|
})
|
|
|
|
|
|
|
|
return rgs
|
|
|
|
}
|
|
|
|
|
2015-04-29 02:08:56 -07:00
|
|
|
// Rules returns the list of the manager's rules.
|
2015-04-29 01:26:49 -07:00
|
|
|
func (m *Manager) Rules() []Rule {
|
2015-12-14 08:40:40 -08:00
|
|
|
m.mtx.RLock()
|
|
|
|
defer m.mtx.RUnlock()
|
|
|
|
|
|
|
|
var rules []Rule
|
|
|
|
for _, g := range m.groups {
|
|
|
|
rules = append(rules, g.rules...)
|
|
|
|
}
|
2013-06-11 02:00:55 -07:00
|
|
|
|
|
|
|
return rules
|
|
|
|
}
|
2013-06-13 07:10:05 -07:00
|
|
|
|
2015-04-29 02:08:56 -07:00
|
|
|
// AlertingRules returns the list of the manager's alerting rules.
|
2015-04-29 01:26:49 -07:00
|
|
|
func (m *Manager) AlertingRules() []*AlertingRule {
|
2015-03-30 10:43:19 -07:00
|
|
|
alerts := []*AlertingRule{}
|
2015-12-14 08:40:40 -08:00
|
|
|
for _, rule := range m.Rules() {
|
2015-03-30 10:43:19 -07:00
|
|
|
if alertingRule, ok := rule.(*AlertingRule); ok {
|
2013-06-13 07:10:05 -07:00
|
|
|
alerts = append(alerts, alertingRule)
|
|
|
|
}
|
|
|
|
}
|
2019-05-14 14:14:27 -07:00
|
|
|
|
2013-06-13 07:10:05 -07:00
|
|
|
return alerts
|
|
|
|
}
|
2022-10-07 07:58:17 -07:00
|
|
|
|
|
|
|
type Sender interface {
|
|
|
|
Send(alerts ...*notifier.Alert)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SendAlerts implements the rules.NotifyFunc for a Notifier.
|
|
|
|
func SendAlerts(s Sender, externalURL string) NotifyFunc {
|
|
|
|
return func(ctx context.Context, expr string, alerts ...*Alert) {
|
|
|
|
var res []*notifier.Alert
|
|
|
|
|
|
|
|
for _, alert := range alerts {
|
|
|
|
a := ¬ifier.Alert{
|
|
|
|
StartsAt: alert.FiredAt,
|
|
|
|
Labels: alert.Labels,
|
|
|
|
Annotations: alert.Annotations,
|
|
|
|
GeneratorURL: externalURL + strutil.TableLinkForExpression(expr),
|
|
|
|
}
|
|
|
|
if !alert.ResolvedAt.IsZero() {
|
|
|
|
a.EndsAt = alert.ResolvedAt
|
|
|
|
} else {
|
|
|
|
a.EndsAt = alert.ValidUntil
|
|
|
|
}
|
|
|
|
res = append(res, a)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(alerts) > 0 {
|
|
|
|
s.Send(res...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|