2015-01-21 11:07:45 -08:00
// Copyright 2013 The Prometheus Authors
2013-01-07 14:24:26 -08:00
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2015-03-30 10:43:19 -07:00
package rules
2013-01-07 14:24:26 -08:00
import (
2017-10-24 21:21:42 -07:00
"context"
2022-06-17 00:54:25 -07:00
"errors"
"fmt"
2017-05-18 08:26:36 -07:00
"math"
2015-06-30 05:38:01 -07:00
"net/url"
2013-04-17 05:42:15 -07:00
"sync"
2013-01-07 14:24:26 -08:00
"time"
2021-06-11 09:17:59 -07:00
"github.com/go-kit/log"
"github.com/go-kit/log/level"
2014-06-18 10:43:15 -07:00
"github.com/prometheus/client_golang/prometheus"
2018-08-02 03:18:24 -07:00
"github.com/prometheus/common/model"
2022-01-25 02:08:04 -08:00
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
2022-05-25 01:06:17 -07:00
"go.opentelemetry.io/otel/codes"
2023-07-02 15:16:26 -07:00
"golang.org/x/exp/slices"
2019-03-25 16:01:12 -07:00
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
2022-10-07 07:58:17 -07:00
"github.com/prometheus/prometheus/notifier"
2015-03-30 10:43:19 -07:00
"github.com/prometheus/prometheus/promql"
2020-02-03 10:06:39 -08:00
"github.com/prometheus/prometheus/promql/parser"
2015-03-14 19:36:15 -07:00
"github.com/prometheus/prometheus/storage"
2021-11-28 23:54:23 -08:00
"github.com/prometheus/prometheus/tsdb/chunkenc"
2022-10-07 07:58:17 -07:00
"github.com/prometheus/prometheus/util/strutil"
2013-06-25 05:02:27 -07:00
)
2013-01-07 14:24:26 -08:00
2019-05-05 02:48:42 -07:00
// RuleHealth describes the health state of a rule.
2018-08-06 15:33:45 -07:00
type RuleHealth string
// The possible health states of a rule based on the last execution.
const (
HealthUnknown RuleHealth = "unknown"
HealthGood RuleHealth = "ok"
HealthBad RuleHealth = "err"
)
2014-06-18 10:43:15 -07:00
// Constants for instrumentation.
2016-01-21 02:09:24 -08:00
const namespace = "prometheus"
2014-06-18 10:43:15 -07:00
2019-01-03 04:07:06 -08:00
// Metrics for rule evaluation.
type Metrics struct {
2021-04-30 10:25:34 -07:00
EvalDuration prometheus . Summary
IterationDuration prometheus . Summary
IterationsMissed * prometheus . CounterVec
IterationsScheduled * prometheus . CounterVec
EvalTotal * prometheus . CounterVec
EvalFailures * prometheus . CounterVec
GroupInterval * prometheus . GaugeVec
GroupLastEvalTime * prometheus . GaugeVec
GroupLastDuration * prometheus . GaugeVec
GroupRules * prometheus . GaugeVec
GroupSamples * prometheus . GaugeVec
2018-12-28 02:20:29 -08:00
}
2020-04-07 05:06:01 -07:00
// NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer,
2019-01-03 04:07:06 -08:00
// if not nil.
func NewGroupMetrics ( reg prometheus . Registerer ) * Metrics {
m := & Metrics {
2021-04-30 10:25:34 -07:00
EvalDuration : prometheus . NewSummary (
2018-12-28 02:20:29 -08:00
prometheus . SummaryOpts {
2019-06-11 17:03:13 -07:00
Namespace : namespace ,
Name : "rule_evaluation_duration_seconds" ,
Help : "The duration for a rule to execute." ,
Objectives : map [ float64 ] float64 { 0.5 : 0.05 , 0.9 : 0.01 , 0.99 : 0.001 } ,
2018-12-28 02:20:29 -08:00
} ) ,
2021-04-30 10:25:34 -07:00
IterationDuration : prometheus . NewSummary ( prometheus . SummaryOpts {
2018-12-28 02:20:29 -08:00
Namespace : namespace ,
Name : "rule_group_duration_seconds" ,
Help : "The duration of rule group evaluations." ,
Objectives : map [ float64 ] float64 { 0.01 : 0.001 , 0.05 : 0.005 , 0.5 : 0.05 , 0.90 : 0.01 , 0.99 : 0.001 } ,
} ) ,
2021-04-30 10:25:34 -07:00
IterationsMissed : prometheus . NewCounterVec (
2020-08-19 06:29:13 -07:00
prometheus . CounterOpts {
Namespace : namespace ,
Name : "rule_group_iterations_missed_total" ,
Help : "The total number of rule group evaluations missed due to slow rule group evaluation." ,
} ,
[ ] string { "rule_group" } ,
) ,
2021-04-30 10:25:34 -07:00
IterationsScheduled : prometheus . NewCounterVec (
2020-08-19 06:29:13 -07:00
prometheus . CounterOpts {
Namespace : namespace ,
Name : "rule_group_iterations_total" ,
Help : "The total number of scheduled rule group evaluations, whether executed or missed." ,
} ,
[ ] string { "rule_group" } ,
) ,
2021-04-30 10:25:34 -07:00
EvalTotal : prometheus . NewCounterVec (
2020-04-08 14:21:37 -07:00
prometheus . CounterOpts {
Namespace : namespace ,
Name : "rule_evaluations_total" ,
Help : "The total number of rule evaluations." ,
} ,
[ ] string { "rule_group" } ,
) ,
2021-04-30 10:25:34 -07:00
EvalFailures : prometheus . NewCounterVec (
2020-04-08 14:21:37 -07:00
prometheus . CounterOpts {
Namespace : namespace ,
Name : "rule_evaluation_failures_total" ,
Help : "The total number of rule evaluation failures." ,
} ,
[ ] string { "rule_group" } ,
) ,
2021-04-30 10:25:34 -07:00
GroupInterval : prometheus . NewGaugeVec (
2020-01-29 03:26:08 -08:00
prometheus . GaugeOpts {
Namespace : namespace ,
Name : "rule_group_interval_seconds" ,
Help : "The interval of a rule group." ,
} ,
[ ] string { "rule_group" } ,
) ,
2021-04-30 10:25:34 -07:00
GroupLastEvalTime : prometheus . NewGaugeVec (
2018-12-28 02:20:29 -08:00
prometheus . GaugeOpts {
Namespace : namespace ,
Name : "rule_group_last_evaluation_timestamp_seconds" ,
Help : "The timestamp of the last rule group evaluation in seconds." ,
} ,
[ ] string { "rule_group" } ,
) ,
2021-04-30 10:25:34 -07:00
GroupLastDuration : prometheus . NewGaugeVec (
2018-12-28 02:20:29 -08:00
prometheus . GaugeOpts {
Namespace : namespace ,
Name : "rule_group_last_duration_seconds" ,
Help : "The duration of the last rule group evaluation." ,
} ,
[ ] string { "rule_group" } ,
) ,
2021-04-30 10:25:34 -07:00
GroupRules : prometheus . NewGaugeVec (
2019-01-13 06:28:07 -08:00
prometheus . GaugeOpts {
Namespace : namespace ,
Name : "rule_group_rules" ,
Help : "The number of rules." ,
} ,
[ ] string { "rule_group" } ,
) ,
2021-04-30 10:25:34 -07:00
GroupSamples : prometheus . NewGaugeVec (
2020-09-25 08:48:38 -07:00
prometheus . GaugeOpts {
Namespace : namespace ,
Name : "rule_group_last_evaluation_samples" ,
Help : "The number of samples returned during the last rule group evaluation." ,
} ,
[ ] string { "rule_group" } ,
) ,
2018-12-28 02:20:29 -08:00
}
if reg != nil {
reg . MustRegister (
2021-04-30 10:25:34 -07:00
m . EvalDuration ,
m . IterationDuration ,
m . IterationsMissed ,
m . IterationsScheduled ,
m . EvalTotal ,
m . EvalFailures ,
m . GroupInterval ,
m . GroupLastEvalTime ,
m . GroupLastDuration ,
m . GroupRules ,
m . GroupSamples ,
2018-12-28 02:20:29 -08:00
)
}
return m
2014-06-18 10:43:15 -07:00
}
2017-11-23 04:04:54 -08:00
// QueryFunc processes PromQL queries.
type QueryFunc func ( ctx context . Context , q string , t time . Time ) ( promql . Vector , error )
// EngineQueryFunc returns a new query function that executes instant queries against
// the given engine.
2018-07-17 20:54:33 -07:00
// It converts scalar into vector results.
2018-01-09 08:44:23 -08:00
func EngineQueryFunc ( engine * promql . Engine , q storage . Queryable ) QueryFunc {
2017-11-23 04:04:54 -08:00
return func ( ctx context . Context , qs string , t time . Time ) ( promql . Vector , error ) {
2023-04-17 21:32:38 -07:00
q , err := engine . NewInstantQuery ( ctx , q , nil , qs , t )
2017-11-23 04:04:54 -08:00
if err != nil {
return nil , err
}
res := q . Exec ( ctx )
if res . Err != nil {
return nil , res . Err
}
switch v := res . Value . ( type ) {
case promql . Vector :
return v , nil
case promql . Scalar :
return promql . Vector { promql . Sample {
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
T : v . T ,
F : v . V ,
2017-11-23 04:04:54 -08:00
Metric : labels . Labels { } ,
} } , nil
default :
2019-03-25 16:01:12 -07:00
return nil , errors . New ( "rule result is not a vector or scalar" )
2017-11-23 04:04:54 -08:00
}
}
}
2015-06-30 02:18:07 -07:00
// A Rule encapsulates a vector expression which is evaluated at a specified
// interval and acted upon (currently either recorded or used for alerting).
type Rule interface {
Name ( ) string
2019-03-15 08:23:36 -07:00
// Labels of the rule.
Labels ( ) labels . Labels
2015-12-14 08:40:40 -08:00
// eval evaluates the rule, including any associated recording or alerting actions.
2021-09-15 00:48:26 -07:00
Eval ( context . Context , time . Time , QueryFunc , * url . URL , int ) ( promql . Vector , error )
2015-06-30 02:18:07 -07:00
// String returns a human-readable string representation of the rule.
String ( ) string
2020-09-13 08:07:59 -07:00
// Query returns the rule query expression.
Query ( ) parser . Expr
2018-08-06 15:33:45 -07:00
// SetLastErr sets the current error experienced by the rule.
SetLastError ( error )
// LastErr returns the last error experienced by the rule.
LastError ( ) error
// SetHealth sets the current health of the rule.
SetHealth ( RuleHealth )
// Health returns the current health of the rule.
Health ( ) RuleHealth
2018-07-17 20:54:33 -07:00
SetEvaluationDuration ( time . Duration )
2018-12-28 02:20:29 -08:00
// GetEvaluationDuration returns last evaluation duration.
// NOTE: Used dynamically by rules.html template.
2018-07-17 20:54:33 -07:00
GetEvaluationDuration ( ) time . Duration
2018-10-12 09:26:59 -07:00
SetEvaluationTimestamp ( time . Time )
2018-12-28 02:20:29 -08:00
// GetEvaluationTimestamp returns last evaluation timestamp.
// NOTE: Used dynamically by rules.html template.
2018-10-12 09:26:59 -07:00
GetEvaluationTimestamp ( ) time . Time
2015-06-30 02:18:07 -07:00
}
2015-12-17 02:46:10 -08:00
// Group is a set of rules that have a logical relation.
2015-12-14 08:40:40 -08:00
type Group struct {
2017-11-23 04:52:15 -08:00
name string
file string
interval time . Duration
2021-09-15 00:48:26 -07:00
limit int
2017-11-23 04:52:15 -08:00
rules [ ] Rule
seriesInPreviousEval [ ] map [ string ] labels . Labels // One per Rule.
2019-08-07 08:11:05 -07:00
staleSeries [ ] labels . Labels
2017-11-23 04:52:15 -08:00
opts * ManagerOptions
mtx sync . Mutex
2020-08-25 03:38:06 -07:00
evaluationTime time . Duration
2023-04-04 11:21:13 -07:00
lastEvaluation time . Time // Wall-clock time of most recent evaluation.
lastEvalTimestamp time . Time // Time slot used for most recent evaluation.
2013-08-20 06:42:06 -07:00
2018-08-02 03:18:24 -07:00
shouldRestore bool
2020-04-18 05:32:18 -07:00
markStale bool
done chan struct { }
2020-02-12 07:22:18 -08:00
terminated chan struct { }
managerDone chan struct { }
2017-06-16 03:22:44 -07:00
logger log . Logger
2018-12-28 02:20:29 -08:00
2019-01-03 04:07:06 -08:00
metrics * Metrics
2022-03-28 17:16:46 -07:00
2023-04-04 11:21:13 -07:00
// Rule group evaluation iteration function,
// defaults to DefaultEvalIterationFunc.
evalIterationFunc GroupEvalIterationFunc
2015-12-14 08:40:40 -08:00
}
2013-08-20 06:42:06 -07:00
2023-04-04 11:21:13 -07:00
// GroupEvalIterationFunc is used to implement and extend rule group
// evaluation iteration logic. It is configured in Group.evalIterationFunc,
// and periodically invoked at each group evaluation interval to
// evaluate the rules in the group at that point in time.
// DefaultEvalIterationFunc is the default implementation.
type GroupEvalIterationFunc func ( ctx context . Context , g * Group , evalTimestamp time . Time )
2022-03-28 17:16:46 -07:00
2020-02-12 07:22:18 -08:00
type GroupOptions struct {
2023-04-04 11:21:13 -07:00
Name , File string
Interval time . Duration
Limit int
Rules [ ] Rule
ShouldRestore bool
Opts * ManagerOptions
done chan struct { }
EvalIterationFunc GroupEvalIterationFunc
2020-02-12 07:22:18 -08:00
}
2016-11-18 08:25:58 -08:00
// NewGroup makes a new Group with the given name, options, and rules.
2020-02-12 07:22:18 -08:00
func NewGroup ( o GroupOptions ) * Group {
metrics := o . Opts . Metrics
2018-12-28 02:20:29 -08:00
if metrics == nil {
2020-02-12 07:22:18 -08:00
metrics = NewGroupMetrics ( o . Opts . Registerer )
2018-12-28 02:20:29 -08:00
}
2020-09-13 08:07:59 -07:00
key := GroupKey ( o . File , o . Name )
2021-04-30 10:25:34 -07:00
metrics . IterationsMissed . WithLabelValues ( key )
metrics . IterationsScheduled . WithLabelValues ( key )
metrics . EvalTotal . WithLabelValues ( key )
metrics . EvalFailures . WithLabelValues ( key )
metrics . GroupLastEvalTime . WithLabelValues ( key )
metrics . GroupLastDuration . WithLabelValues ( key )
metrics . GroupRules . WithLabelValues ( key ) . Set ( float64 ( len ( o . Rules ) ) )
metrics . GroupSamples . WithLabelValues ( key )
metrics . GroupInterval . WithLabelValues ( key ) . Set ( o . Interval . Seconds ( ) )
2018-12-28 02:20:29 -08:00
2023-04-04 11:21:13 -07:00
evalIterationFunc := o . EvalIterationFunc
if evalIterationFunc == nil {
evalIterationFunc = DefaultEvalIterationFunc
}
2015-12-14 08:40:40 -08:00
return & Group {
2023-04-04 11:21:13 -07:00
name : o . Name ,
file : o . File ,
interval : o . Interval ,
limit : o . Limit ,
rules : o . Rules ,
shouldRestore : o . ShouldRestore ,
opts : o . Opts ,
seriesInPreviousEval : make ( [ ] map [ string ] labels . Labels , len ( o . Rules ) ) ,
done : make ( chan struct { } ) ,
managerDone : o . done ,
terminated : make ( chan struct { } ) ,
logger : log . With ( o . Opts . Logger , "file" , o . File , "group" , o . Name ) ,
metrics : metrics ,
evalIterationFunc : evalIterationFunc ,
2015-12-14 08:40:40 -08:00
}
2013-08-20 06:42:06 -07:00
}
2017-06-14 03:39:14 -07:00
// Name returns the group name.
func ( g * Group ) Name ( ) string { return g . name }
// File returns the group's file.
func ( g * Group ) File ( ) string { return g . file }
// Rules returns the group's rules.
func ( g * Group ) Rules ( ) [ ] Rule { return g . rules }
2022-03-28 17:16:46 -07:00
// Queryable returns the group's querable.
func ( g * Group ) Queryable ( ) storage . Queryable { return g . opts . Queryable }
// Context returns the group's context.
func ( g * Group ) Context ( ) context . Context { return g . opts . Context }
2018-06-27 00:15:17 -07:00
// Interval returns the group's interval.
func ( g * Group ) Interval ( ) time . Duration { return g . interval }
2021-09-15 00:48:26 -07:00
// Limit returns the group's limit.
func ( g * Group ) Limit ( ) int { return g . limit }
2023-04-04 11:21:13 -07:00
func ( g * Group ) Logger ( ) log . Logger { return g . logger }
2017-11-23 23:59:05 -08:00
func ( g * Group ) run ( ctx context . Context ) {
2015-12-14 08:40:40 -08:00
defer close ( g . terminated )
2013-08-20 06:42:06 -07:00
2015-12-14 08:40:40 -08:00
// Wait an initial amount to have consistently slotted intervals.
2020-09-13 08:07:59 -07:00
evalTimestamp := g . EvalTimestamp ( time . Now ( ) . UnixNano ( ) ) . Add ( g . interval )
2016-01-12 01:52:40 -08:00
select {
2018-06-01 07:23:07 -07:00
case <- time . After ( time . Until ( evalTimestamp ) ) :
2016-01-12 01:52:40 -08:00
case <- g . done :
return
}
2013-08-20 06:42:06 -07:00
2020-01-27 01:53:10 -08:00
ctx = promql . NewOriginContext ( ctx , map [ string ] interface { } {
"ruleGroup" : map [ string ] string {
"file" : g . File ( ) ,
"name" : g . Name ( ) ,
} ,
2020-01-10 00:28:17 -08:00
} )
2018-06-01 07:23:07 -07:00
// The assumption here is that since the ticker was started after having
// waited for `evalTimestamp` to pass, the ticks will trigger soon
// after each `evalTimestamp + N * g.interval` occurrence.
2015-12-14 08:40:40 -08:00
tick := time . NewTicker ( g . interval )
defer tick . Stop ( )
2013-01-07 14:24:26 -08:00
2020-04-18 05:32:18 -07:00
defer func ( ) {
if ! g . markStale {
2020-02-12 07:22:18 -08:00
return
}
go func ( now time . Time ) {
for _ , rule := range g . seriesInPreviousEval {
for _ , r := range rule {
g . staleSeries = append ( g . staleSeries , r )
}
}
// That can be garbage collected at this point.
g . seriesInPreviousEval = nil
// Wait for 2 intervals to give the opportunity to renamed rules
// to insert new series in the tsdb. At this point if there is a
// renamed rule, it should already be started.
select {
case <- g . managerDone :
case <- time . After ( 2 * g . interval ) :
2020-07-24 07:10:51 -07:00
g . cleanupStaleSeries ( ctx , now )
2020-02-12 07:22:18 -08:00
}
} ( time . Now ( ) )
2020-04-18 05:32:18 -07:00
} ( )
2020-02-12 07:22:18 -08:00
2023-04-04 11:21:13 -07:00
g . evalIterationFunc ( ctx , g , evalTimestamp )
2018-08-02 03:18:24 -07:00
if g . shouldRestore {
// If we have to restore, we wait for another Eval to finish.
// The reason behind this is, during first eval (or before it)
// we might not have enough data scraped, and recording rules would not
// have updated the latest values, on which some alerts might depend.
select {
2020-04-18 05:32:18 -07:00
case <- g . done :
2018-08-02 03:18:24 -07:00
return
case <- tick . C :
missed := ( time . Since ( evalTimestamp ) / g . interval ) - 1
if missed > 0 {
2021-04-30 10:25:34 -07:00
g . metrics . IterationsMissed . WithLabelValues ( GroupKey ( g . file , g . name ) ) . Add ( float64 ( missed ) )
g . metrics . IterationsScheduled . WithLabelValues ( GroupKey ( g . file , g . name ) ) . Add ( float64 ( missed ) )
2018-08-02 03:18:24 -07:00
}
evalTimestamp = evalTimestamp . Add ( ( missed + 1 ) * g . interval )
2023-04-04 11:21:13 -07:00
g . evalIterationFunc ( ctx , g , evalTimestamp )
2018-08-02 03:18:24 -07:00
}
g . RestoreForState ( time . Now ( ) )
g . shouldRestore = false
}
2013-01-07 14:24:26 -08:00
for {
select {
2020-04-18 05:32:18 -07:00
case <- g . done :
2013-12-11 06:30:27 -08:00
return
2015-01-29 06:05:10 -08:00
default :
select {
2020-04-18 05:32:18 -07:00
case <- g . done :
2015-01-29 06:05:10 -08:00
return
2015-12-14 08:40:40 -08:00
case <- tick . C :
2018-06-01 07:23:07 -07:00
missed := ( time . Since ( evalTimestamp ) / g . interval ) - 1
2017-04-02 16:03:28 -07:00
if missed > 0 {
2021-04-30 10:25:34 -07:00
g . metrics . IterationsMissed . WithLabelValues ( GroupKey ( g . file , g . name ) ) . Add ( float64 ( missed ) )
g . metrics . IterationsScheduled . WithLabelValues ( GroupKey ( g . file , g . name ) ) . Add ( float64 ( missed ) )
2017-04-02 16:03:28 -07:00
}
2018-06-01 07:23:07 -07:00
evalTimestamp = evalTimestamp . Add ( ( missed + 1 ) * g . interval )
2022-03-28 17:16:46 -07:00
2023-04-04 11:21:13 -07:00
g . evalIterationFunc ( ctx , g , evalTimestamp )
2015-01-29 06:05:10 -08:00
}
2013-01-07 14:24:26 -08:00
}
}
}
2023-04-04 11:21:13 -07:00
// DefaultEvalIterationFunc is the default implementation of
// GroupEvalIterationFunc that is periodically invoked to evaluate the rules
// in a group at a given point in time and updates Group state and metrics
// accordingly. Custom GroupEvalIterationFunc implementations are recommended
// to invoke this function as well, to ensure correct Group state and metrics
// are maintained.
func DefaultEvalIterationFunc ( ctx context . Context , g * Group , evalTimestamp time . Time ) {
g . metrics . IterationsScheduled . WithLabelValues ( GroupKey ( g . file , g . name ) ) . Inc ( )
start := time . Now ( )
g . Eval ( ctx , evalTimestamp )
timeSinceStart := time . Since ( start )
g . metrics . IterationDuration . Observe ( timeSinceStart . Seconds ( ) )
g . setEvaluationTime ( timeSinceStart )
g . setLastEvaluation ( start )
g . setLastEvalTimestamp ( evalTimestamp )
2022-03-28 17:16:46 -07:00
}
2015-12-14 08:40:40 -08:00
func ( g * Group ) stop ( ) {
close ( g . done )
<- g . terminated
2013-01-07 14:24:26 -08:00
}
2017-06-16 04:14:33 -07:00
func ( g * Group ) hash ( ) uint64 {
l := labels . New (
2019-05-03 06:11:28 -07:00
labels . Label { Name : "name" , Value : g . name } ,
labels . Label { Name : "file" , Value : g . file } ,
2017-06-16 04:14:33 -07:00
)
return l . Hash ( )
2015-12-14 08:40:40 -08:00
}
2019-05-14 14:14:27 -07:00
// AlertingRules returns the list of the group's alerting rules.
func ( g * Group ) AlertingRules ( ) [ ] * AlertingRule {
g . mtx . Lock ( )
defer g . mtx . Unlock ( )
var alerts [ ] * AlertingRule
for _ , rule := range g . rules {
if alertingRule , ok := rule . ( * AlertingRule ) ; ok {
alerts = append ( alerts , alertingRule )
}
}
2023-07-02 15:16:26 -07:00
slices . SortFunc ( alerts , func ( a , b * AlertingRule ) bool {
return a . State ( ) > b . State ( ) ||
( a . State ( ) == b . State ( ) && a . Name ( ) < b . Name ( ) )
2019-05-14 14:14:27 -07:00
} )
return alerts
}
// HasAlertingRules returns true if the group contains at least one AlertingRule.
func ( g * Group ) HasAlertingRules ( ) bool {
g . mtx . Lock ( )
defer g . mtx . Unlock ( )
for _ , rule := range g . rules {
if _ , ok := rule . ( * AlertingRule ) ; ok {
return true
}
}
return false
}
2020-08-25 03:38:06 -07:00
// GetEvaluationTime returns the time in seconds it took to evaluate the rule group.
func ( g * Group ) GetEvaluationTime ( ) time . Duration {
2017-11-17 07:18:34 -08:00
g . mtx . Lock ( )
defer g . mtx . Unlock ( )
2020-08-25 03:38:06 -07:00
return g . evaluationTime
2017-11-17 07:18:34 -08:00
}
2020-08-25 03:38:06 -07:00
// setEvaluationTime sets the time in seconds the last evaluation took.
func ( g * Group ) setEvaluationTime ( dur time . Duration ) {
2021-04-30 10:25:34 -07:00
g . metrics . GroupLastDuration . WithLabelValues ( GroupKey ( g . file , g . name ) ) . Set ( dur . Seconds ( ) )
2018-12-28 02:20:29 -08:00
2017-11-17 07:18:34 -08:00
g . mtx . Lock ( )
defer g . mtx . Unlock ( )
2020-08-25 03:38:06 -07:00
g . evaluationTime = dur
2017-11-17 07:18:34 -08:00
}
2020-08-25 03:38:06 -07:00
// GetLastEvaluation returns the time the last evaluation of the rule group took place.
func ( g * Group ) GetLastEvaluation ( ) time . Time {
2018-10-12 09:26:59 -07:00
g . mtx . Lock ( )
defer g . mtx . Unlock ( )
2020-08-25 03:38:06 -07:00
return g . lastEvaluation
2018-10-12 09:26:59 -07:00
}
2020-11-01 07:54:04 -08:00
// setLastEvaluation updates evaluationTimestamp to the timestamp of when the rule group was last evaluated.
2020-08-25 03:38:06 -07:00
func ( g * Group ) setLastEvaluation ( ts time . Time ) {
2021-04-30 10:25:34 -07:00
g . metrics . GroupLastEvalTime . WithLabelValues ( GroupKey ( g . file , g . name ) ) . Set ( float64 ( ts . UnixNano ( ) ) / 1e9 )
2018-12-28 02:20:29 -08:00
2018-10-12 09:26:59 -07:00
g . mtx . Lock ( )
defer g . mtx . Unlock ( )
2020-08-25 03:38:06 -07:00
g . lastEvaluation = ts
2018-10-12 09:26:59 -07:00
}
2023-04-04 11:21:13 -07:00
// GetLastEvalTimestamp returns the timestamp of the last evaluation.
func ( g * Group ) GetLastEvalTimestamp ( ) time . Time {
g . mtx . Lock ( )
defer g . mtx . Unlock ( )
return g . lastEvalTimestamp
}
// setLastEvalTimestamp updates lastEvalTimestamp to the timestamp of the last evaluation.
func ( g * Group ) setLastEvalTimestamp ( ts time . Time ) {
g . mtx . Lock ( )
defer g . mtx . Unlock ( )
g . lastEvalTimestamp = ts
}
2020-09-13 08:07:59 -07:00
// EvalTimestamp returns the immediately preceding consistently slotted evaluation time.
func ( g * Group ) EvalTimestamp ( startTime int64 ) time . Time {
2015-12-14 08:40:40 -08:00
var (
2018-06-01 07:23:07 -07:00
offset = int64 ( g . hash ( ) % uint64 ( g . interval ) )
2022-10-12 05:12:03 -07:00
// This group's evaluation times differ from the perfect time intervals by `offset` nanoseconds.
// But we can only use `% interval` to align with the interval. And `% interval` will always
// align with the perfect time intervals, instead of this group's. Because of this we add
// `offset` _after_ aligning with the perfect time interval.
//
// There can be cases where adding `offset` to the perfect evaluation time can yield a
// timestamp in the future, which is not what EvalTimestamp should do.
// So we subtract one `offset` to make sure that `now - (now % interval) + offset` gives an
// evaluation time in the past.
2020-10-31 06:40:24 -07:00
adjNow = startTime - offset
2022-10-12 05:12:03 -07:00
// Adjust to perfect evaluation intervals.
base = adjNow - ( adjNow % int64 ( g . interval ) )
// Add one offset to randomize the evaluation times of this group.
next = base + offset
2015-12-14 08:40:40 -08:00
)
2022-10-12 05:12:03 -07:00
return time . Unix ( 0 , next ) . UTC ( )
2015-12-14 08:40:40 -08:00
}
2013-07-30 08:18:07 -07:00
2019-03-15 08:23:36 -07:00
func nameAndLabels ( rule Rule ) string {
return rule . Name ( ) + rule . Labels ( ) . String ( )
}
2018-07-18 06:14:38 -07:00
// CopyState copies the alerting rule and staleness related state from the given group.
2017-05-19 08:43:59 -07:00
//
2019-03-15 08:23:36 -07:00
// Rules are matched based on their name and labels. If there are duplicates, the
2017-05-19 08:43:59 -07:00
// first is matched with the first, second with the second etc.
2018-07-18 06:14:38 -07:00
func ( g * Group ) CopyState ( from * Group ) {
2020-08-25 03:38:06 -07:00
g . evaluationTime = from . evaluationTime
g . lastEvaluation = from . lastEvaluation
2017-11-30 05:49:15 -08:00
2017-05-19 08:43:59 -07:00
ruleMap := make ( map [ string ] [ ] int , len ( from . rules ) )
for fi , fromRule := range from . rules {
2019-03-15 08:23:36 -07:00
nameAndLabels := nameAndLabels ( fromRule )
l := ruleMap [ nameAndLabels ]
ruleMap [ nameAndLabels ] = append ( l , fi )
2017-05-19 08:43:59 -07:00
}
for i , rule := range g . rules {
2019-03-15 08:23:36 -07:00
nameAndLabels := nameAndLabels ( rule )
indexes := ruleMap [ nameAndLabels ]
2017-05-19 08:43:59 -07:00
if len ( indexes ) == 0 {
continue
}
fi := indexes [ 0 ]
g . seriesInPreviousEval [ i ] = from . seriesInPreviousEval [ fi ]
2019-03-15 08:23:36 -07:00
ruleMap [ nameAndLabels ] = indexes [ 1 : ]
2017-05-19 08:43:59 -07:00
ar , ok := rule . ( * AlertingRule )
2015-12-14 08:40:40 -08:00
if ! ok {
continue
}
2017-05-19 08:43:59 -07:00
far , ok := from . rules [ fi ] . ( * AlertingRule )
if ! ok {
continue
}
for fp , a := range far . active {
ar . active [ fp ] = a
2015-12-14 08:40:40 -08:00
}
}
2019-08-07 08:11:05 -07:00
// Handle deleted and unmatched duplicate rules.
g . staleSeries = from . staleSeries
for fi , fromRule := range from . rules {
nameAndLabels := nameAndLabels ( fromRule )
l := ruleMap [ nameAndLabels ]
if len ( l ) != 0 {
for _ , series := range from . seriesInPreviousEval [ fi ] {
g . staleSeries = append ( g . staleSeries , series )
}
}
}
2015-12-14 08:40:40 -08:00
}
2017-06-14 02:37:54 -07:00
// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
2017-11-23 23:59:05 -08:00
func ( g * Group ) Eval ( ctx context . Context , ts time . Time ) {
2020-09-25 08:48:38 -07:00
var samplesTotal float64
2017-05-19 05:42:07 -07:00
for i , rule := range g . rules {
2017-06-16 04:27:22 -07:00
select {
case <- g . done :
return
default :
}
2017-06-12 05:44:39 -07:00
func ( i int , rule Rule ) {
2022-01-25 02:08:04 -08:00
ctx , sp := otel . Tracer ( "" ) . Start ( ctx , "rule" )
sp . SetAttributes ( attribute . String ( "name" , rule . Name ( ) ) )
2016-01-21 02:09:24 -08:00
defer func ( t time . Time ) {
2022-01-25 02:08:04 -08:00
sp . End ( )
2018-12-28 02:20:29 -08:00
since := time . Since ( t )
2021-04-30 10:25:34 -07:00
g . metrics . EvalDuration . Observe ( since . Seconds ( ) )
2018-12-28 02:20:29 -08:00
rule . SetEvaluationDuration ( since )
2018-10-12 09:26:59 -07:00
rule . SetEvaluationTimestamp ( t )
2016-01-21 02:09:24 -08:00
} ( time . Now ( ) )
2021-04-30 10:25:34 -07:00
g . metrics . EvalTotal . WithLabelValues ( GroupKey ( g . File ( ) , g . Name ( ) ) ) . Inc ( )
2015-12-14 08:40:40 -08:00
2021-09-15 00:48:26 -07:00
vector , err := rule . Eval ( ctx , ts , g . opts . QueryFunc , g . opts . ExternalURL , g . Limit ( ) )
2015-12-14 08:40:40 -08:00
if err != nil {
2021-03-18 07:44:33 -07:00
rule . SetHealth ( HealthBad )
rule . SetLastError ( err )
2022-05-25 01:06:17 -07:00
sp . SetStatus ( codes . Error , err . Error ( ) )
2021-04-30 10:25:34 -07:00
g . metrics . EvalFailures . WithLabelValues ( GroupKey ( g . File ( ) , g . Name ( ) ) ) . Inc ( )
2021-03-18 07:44:33 -07:00
2016-01-18 07:53:37 -08:00
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.
2022-06-17 00:54:25 -07:00
var eqc promql . ErrQueryCanceled
if ! errors . As ( err , & eqc ) {
2022-03-21 11:52:20 -07:00
level . Warn ( g . logger ) . Log ( "name" , rule . Name ( ) , "index" , i , "msg" , "Evaluating rule failed" , "rule" , rule , "err" , err )
2016-01-18 07:53:37 -08:00
}
2016-01-21 02:09:24 -08:00
return
2015-12-14 08:40:40 -08:00
}
2021-08-20 13:42:31 -07:00
rule . SetHealth ( HealthGood )
rule . SetLastError ( nil )
2020-09-25 08:48:38 -07:00
samplesTotal += float64 ( len ( vector ) )
2015-12-14 08:40:40 -08:00
2016-01-21 02:09:24 -08:00
if ar , ok := rule . ( * AlertingRule ) ; ok {
2018-08-28 08:05:00 -07:00
ar . sendAlerts ( ctx , ts , g . opts . ResendDelay , g . interval , g . opts . NotifyFunc )
2015-12-14 08:40:40 -08:00
}
2016-05-19 07:22:49 -07:00
var (
numOutOfOrder = 0
2022-12-27 05:15:32 -08:00
numTooOld = 0
2016-05-19 07:22:49 -07:00
numDuplicates = 0
)
2016-12-24 15:37:46 -08:00
2020-07-24 07:10:51 -07:00
app := g . opts . Appendable . Appender ( ctx )
2017-05-19 05:42:07 -07:00
seriesReturned := make ( map [ string ] labels . Labels , len ( g . seriesInPreviousEval [ i ] ) )
2020-03-13 12:54:47 -07:00
defer func ( ) {
if err := app . Commit ( ) ; err != nil {
2021-03-18 07:44:33 -07:00
rule . SetHealth ( HealthBad )
rule . SetLastError ( err )
2022-05-25 01:06:17 -07:00
sp . SetStatus ( codes . Error , err . Error ( ) )
2021-04-30 10:25:34 -07:00
g . metrics . EvalFailures . WithLabelValues ( GroupKey ( g . File ( ) , g . Name ( ) ) ) . Inc ( )
2021-03-18 07:44:33 -07:00
2022-03-21 11:52:20 -07:00
level . Warn ( g . logger ) . Log ( "name" , rule . Name ( ) , "index" , i , "msg" , "Rule sample appending failed" , "err" , err )
2020-03-13 12:54:47 -07:00
return
}
g . seriesInPreviousEval [ i ] = seriesReturned
} ( )
2021-03-18 07:44:33 -07:00
2015-12-14 08:40:40 -08:00
for _ , s := range vector {
2023-01-10 05:37:24 -08:00
if s . H != nil {
_ , err = app . AppendHistogram ( 0 , s . Metric , s . T , nil , s . H )
} else {
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
_ , err = app . Append ( 0 , s . Metric , s . T , s . F )
2023-01-10 05:37:24 -08:00
}
if err != nil {
2021-03-18 07:44:33 -07:00
rule . SetHealth ( HealthBad )
rule . SetLastError ( err )
2022-05-25 01:06:17 -07:00
sp . SetStatus ( codes . Error , err . Error ( ) )
2022-06-17 00:54:25 -07:00
unwrappedErr := errors . Unwrap ( err )
2022-12-15 03:11:25 -08:00
if unwrappedErr == nil {
unwrappedErr = err
}
2022-06-17 00:54:25 -07:00
switch {
case errors . Is ( unwrappedErr , storage . ErrOutOfOrderSample ) :
2016-05-19 07:22:49 -07:00
numOutOfOrder ++
2022-03-21 11:52:20 -07:00
level . Debug ( g . logger ) . Log ( "name" , rule . Name ( ) , "index" , i , "msg" , "Rule evaluation result discarded" , "err" , err , "sample" , s )
2022-12-27 05:15:32 -08:00
case errors . Is ( unwrappedErr , storage . ErrTooOldSample ) :
numTooOld ++
level . Debug ( g . logger ) . Log ( "name" , rule . Name ( ) , "index" , i , "msg" , "Rule evaluation result discarded" , "err" , err , "sample" , s )
2022-06-17 00:54:25 -07:00
case errors . Is ( unwrappedErr , storage . ErrDuplicateSampleForTimestamp ) :
2016-05-19 07:22:49 -07:00
numDuplicates ++
2022-03-21 11:52:20 -07:00
level . Debug ( g . logger ) . Log ( "name" , rule . Name ( ) , "index" , i , "msg" , "Rule evaluation result discarded" , "err" , err , "sample" , s )
2016-05-19 07:22:49 -07:00
default :
2022-03-21 11:52:20 -07:00
level . Warn ( g . logger ) . Log ( "name" , rule . Name ( ) , "index" , i , "msg" , "Rule evaluation result discarded" , "err" , err , "sample" , s )
2016-05-19 07:22:49 -07:00
}
2017-05-18 08:26:36 -07:00
} else {
2021-11-08 08:52:33 -08:00
buf := [ 1024 ] byte { }
seriesReturned [ string ( s . Metric . Bytes ( buf [ : ] ) ) ] = s . Metric
2016-05-19 07:22:49 -07:00
}
}
if numOutOfOrder > 0 {
2022-03-21 11:52:20 -07:00
level . Warn ( g . logger ) . Log ( "name" , rule . Name ( ) , "index" , i , "msg" , "Error on ingesting out-of-order result from rule evaluation" , "numDropped" , numOutOfOrder )
2016-05-19 07:22:49 -07:00
}
2022-12-27 05:15:32 -08:00
if numTooOld > 0 {
level . Warn ( g . logger ) . Log ( "name" , rule . Name ( ) , "index" , i , "msg" , "Error on ingesting too old result from rule evaluation" , "numDropped" , numTooOld )
}
2016-05-19 07:22:49 -07:00
if numDuplicates > 0 {
2022-03-21 11:52:20 -07:00
level . Warn ( g . logger ) . Log ( "name" , rule . Name ( ) , "index" , i , "msg" , "Error on ingesting results from rule evaluation with different value but same timestamp" , "numDropped" , numDuplicates )
2015-12-14 08:40:40 -08:00
}
2017-05-19 05:42:07 -07:00
for metric , lset := range g . seriesInPreviousEval [ i ] {
if _ , ok := seriesReturned [ metric ] ; ! ok {
// Series no longer exposed, mark it stale.
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lset , timestamp . FromTime ( ts ) , math . Float64frombits ( value . StaleNaN ) )
2022-06-17 00:54:25 -07:00
unwrappedErr := errors . Unwrap ( err )
2022-12-15 03:11:25 -08:00
if unwrappedErr == nil {
unwrappedErr = err
}
2022-06-17 00:54:25 -07:00
switch {
case unwrappedErr == nil :
2022-12-27 05:15:32 -08:00
case errors . Is ( unwrappedErr , storage . ErrOutOfOrderSample ) ,
errors . Is ( unwrappedErr , storage . ErrTooOldSample ) ,
errors . Is ( unwrappedErr , storage . ErrDuplicateSampleForTimestamp ) :
2017-05-19 05:42:07 -07:00
// Do not count these in logging, as this is expected if series
// is exposed from a different rule.
default :
2022-03-21 11:52:20 -07:00
level . Warn ( g . logger ) . Log ( "name" , rule . Name ( ) , "index" , i , "msg" , "Adding stale sample failed" , "sample" , lset . String ( ) , "err" , err )
2017-05-19 05:42:07 -07:00
}
}
}
} ( i , rule )
2015-12-14 08:40:40 -08:00
}
2020-09-25 08:48:38 -07:00
if g . metrics != nil {
2021-04-30 10:25:34 -07:00
g . metrics . GroupSamples . WithLabelValues ( GroupKey ( g . File ( ) , g . Name ( ) ) ) . Set ( samplesTotal )
2020-09-25 08:48:38 -07:00
}
2020-07-24 07:10:51 -07:00
g . cleanupStaleSeries ( ctx , ts )
2020-02-12 07:22:18 -08:00
}
2019-08-07 08:11:05 -07:00
2020-07-24 07:10:51 -07:00
func ( g * Group ) cleanupStaleSeries ( ctx context . Context , ts time . Time ) {
2020-02-12 07:22:18 -08:00
if len ( g . staleSeries ) == 0 {
return
}
2020-07-24 07:10:51 -07:00
app := g . opts . Appendable . Appender ( ctx )
2020-02-12 07:22:18 -08:00
for _ , s := range g . staleSeries {
// Rule that produced series no longer configured, mark it stale.
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , s , timestamp . FromTime ( ts ) , math . Float64frombits ( value . StaleNaN ) )
2022-06-17 00:54:25 -07:00
unwrappedErr := errors . Unwrap ( err )
2022-12-15 03:11:25 -08:00
if unwrappedErr == nil {
unwrappedErr = err
}
2022-06-17 00:54:25 -07:00
switch {
case unwrappedErr == nil :
2022-12-27 05:15:32 -08:00
case errors . Is ( unwrappedErr , storage . ErrOutOfOrderSample ) ,
errors . Is ( unwrappedErr , storage . ErrTooOldSample ) ,
errors . Is ( unwrappedErr , storage . ErrDuplicateSampleForTimestamp ) :
2020-02-12 07:22:18 -08:00
// Do not count these in logging, as this is expected if series
// is exposed from a different rule.
default :
2020-04-11 01:22:18 -07:00
level . Warn ( g . logger ) . Log ( "msg" , "Adding stale sample for previous configuration failed" , "sample" , s , "err" , err )
2019-08-07 08:11:05 -07:00
}
}
2020-02-12 07:22:18 -08:00
if err := app . Commit ( ) ; err != nil {
2020-04-11 01:22:18 -07:00
level . Warn ( g . logger ) . Log ( "msg" , "Stale sample appending for previous configuration failed" , "err" , err )
2020-02-12 07:22:18 -08:00
} else {
g . staleSeries = nil
}
2015-12-14 08:40:40 -08:00
}
2018-08-02 03:18:24 -07:00
// RestoreForState restores the 'for' state of the alerts
// by looking up last ActiveAt from storage.
func ( g * Group ) RestoreForState ( ts time . Time ) {
maxtMS := int64 ( model . TimeFromUnixNano ( ts . UnixNano ( ) ) )
// We allow restoration only if alerts were active before after certain time.
mint := ts . Add ( - g . opts . OutageTolerance )
mintMS := int64 ( model . TimeFromUnixNano ( mint . UnixNano ( ) ) )
2020-06-26 11:06:36 -07:00
q , err := g . opts . Queryable . Querier ( g . opts . Context , mintMS , maxtMS )
2018-08-02 03:18:24 -07:00
if err != nil {
level . Error ( g . logger ) . Log ( "msg" , "Failed to get Querier" , "err" , err )
return
}
2018-12-28 02:20:29 -08:00
defer func ( ) {
if err := q . Close ( ) ; err != nil {
level . Error ( g . logger ) . Log ( "msg" , "Failed to close Querier" , "err" , err )
}
} ( )
2018-08-02 03:18:24 -07:00
for _ , rule := range g . Rules ( ) {
alertRule , ok := rule . ( * AlertingRule )
if ! ok {
continue
}
alertHoldDuration := alertRule . HoldDuration ( )
if alertHoldDuration < g . opts . ForGracePeriod {
// If alertHoldDuration is already less than grace period, we would not
// like to make it wait for `g.opts.ForGracePeriod` time before firing.
// Hence we skip restoration, which will make it wait for alertHoldDuration.
alertRule . SetRestored ( true )
continue
}
alertRule . ForEachActiveAlert ( func ( a * Alert ) {
var s storage . Series
2022-03-28 17:16:46 -07:00
s , err := alertRule . QueryforStateSeries ( a , q )
if err != nil {
2020-06-09 09:57:31 -07:00
// Querier Warnings are ignored. We do not care unless we have an error.
level . Error ( g . logger ) . Log (
"msg" , "Failed to restore 'for' state" ,
labels . AlertName , alertRule . Name ( ) ,
"stage" , "Select" ,
"err" , err ,
)
return
}
2022-03-28 17:16:46 -07:00
if s == nil {
2018-08-02 03:18:24 -07:00
return
}
// Series found for the 'for' state.
var t int64
var v float64
2022-09-20 10:16:45 -07:00
it := s . Iterator ( nil )
2021-11-28 23:54:23 -08:00
for it . Next ( ) == chunkenc . ValFloat {
2018-08-02 03:18:24 -07:00
t , v = it . At ( )
}
if it . Err ( ) != nil {
level . Error ( g . logger ) . Log ( "msg" , "Failed to restore 'for' state" ,
labels . AlertName , alertRule . Name ( ) , "stage" , "Iterator" , "err" , it . Err ( ) )
return
}
if value . IsStaleNaN ( v ) { // Alert was not active.
return
}
2020-03-29 09:35:39 -07:00
downAt := time . Unix ( t / 1000 , 0 ) . UTC ( )
restoredActiveAt := time . Unix ( int64 ( v ) , 0 ) . UTC ( )
2018-08-02 03:18:24 -07:00
timeSpentPending := downAt . Sub ( restoredActiveAt )
timeRemainingPending := alertHoldDuration - timeSpentPending
2023-04-12 04:05:41 -07:00
switch {
case timeRemainingPending <= 0 :
2018-08-02 03:18:24 -07:00
// It means that alert was firing when prometheus went down.
// In the next Eval, the state of this alert will be set back to
// firing again if it's still firing in that Eval.
// Nothing to be done in this case.
2023-04-12 04:05:41 -07:00
case timeRemainingPending < g . opts . ForGracePeriod :
2018-08-02 03:18:24 -07:00
// (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration
// /* new firing time */ /* moving back by hold duration */
//
// Proof of correctness:
// firingTime = restoredActiveAt.Add(alertHoldDuration)
// = ts + m.opts.ForGracePeriod - alertHoldDuration + alertHoldDuration
// = ts + m.opts.ForGracePeriod
//
// Time remaining to fire = firingTime.Sub(ts)
// = (ts + m.opts.ForGracePeriod) - ts
// = m.opts.ForGracePeriod
restoredActiveAt = ts . Add ( g . opts . ForGracePeriod ) . Add ( - alertHoldDuration )
2023-04-12 04:05:41 -07:00
default :
2018-08-02 03:18:24 -07:00
// By shifting ActiveAt to the future (ActiveAt + some_duration),
// the total pending time from the original ActiveAt
// would be `alertHoldDuration + some_duration`.
// Here, some_duration = downDuration.
downDuration := ts . Sub ( downAt )
restoredActiveAt = restoredActiveAt . Add ( downDuration )
}
a . ActiveAt = restoredActiveAt
level . Debug ( g . logger ) . Log ( "msg" , "'for' state restored" ,
labels . AlertName , alertRule . Name ( ) , "restored_time" , a . ActiveAt . Format ( time . RFC850 ) ,
"labels" , a . Labels . String ( ) )
} )
alertRule . SetRestored ( true )
}
}
2019-12-19 02:46:22 -08:00
// Equals return if two groups are the same.
2019-12-19 02:41:11 -08:00
func ( g * Group ) Equals ( ng * Group ) bool {
if g . name != ng . name {
return false
}
if g . file != ng . file {
return false
}
if g . interval != ng . interval {
return false
}
2021-09-15 00:48:26 -07:00
if g . limit != ng . limit {
return false
}
2019-12-19 02:41:11 -08:00
if len ( g . rules ) != len ( ng . rules ) {
return false
}
for i , gr := range g . rules {
if gr . String ( ) != ng . rules [ i ] . String ( ) {
return false
}
}
return true
}
2015-12-14 08:40:40 -08:00
// The Manager manages recording and alerting rules.
type Manager struct {
2018-08-02 03:18:24 -07:00
opts * ManagerOptions
groups map [ string ] * Group
mtx sync . RWMutex
block chan struct { }
2020-02-12 07:22:18 -08:00
done chan struct { }
2018-08-02 03:18:24 -07:00
restored bool
2017-06-16 03:22:44 -07:00
logger log . Logger
2015-12-14 08:40:40 -08:00
}
2013-07-30 08:18:07 -07:00
2017-11-23 23:59:05 -08:00
// NotifyFunc sends notifications about a set of alerts generated by the given expression.
2018-08-04 12:31:12 -07:00
type NotifyFunc func ( ctx context . Context , expr string , alerts ... * Alert )
2017-11-23 23:59:05 -08:00
2015-12-14 08:40:40 -08:00
// ManagerOptions bundles options for the Manager.
type ManagerOptions struct {
2018-08-02 03:18:24 -07:00
ExternalURL * url . URL
QueryFunc QueryFunc
NotifyFunc NotifyFunc
Context context . Context
2020-02-06 07:58:38 -08:00
Appendable storage . Appendable
2020-06-26 11:06:36 -07:00
Queryable storage . Queryable
2018-08-02 03:18:24 -07:00
Logger log . Logger
Registerer prometheus . Registerer
OutageTolerance time . Duration
ForGracePeriod time . Duration
2018-08-27 09:41:42 -07:00
ResendDelay time . Duration
2020-07-22 07:19:34 -07:00
GroupLoader GroupLoader
2018-12-28 02:20:29 -08:00
2019-01-03 04:07:06 -08:00
Metrics * Metrics
2015-12-14 08:40:40 -08:00
}
2015-03-14 19:36:15 -07:00
2015-12-14 08:40:40 -08:00
// NewManager returns an implementation of Manager, ready to be started
// by calling the Run method.
func NewManager ( o * ManagerOptions ) * Manager {
2019-01-03 04:07:06 -08:00
if o . Metrics == nil {
o . Metrics = NewGroupMetrics ( o . Registerer )
2018-12-28 02:20:29 -08:00
}
2020-07-22 07:19:34 -07:00
if o . GroupLoader == nil {
o . GroupLoader = FileLoader { }
}
2017-11-30 06:36:34 -08:00
m := & Manager {
2015-12-14 08:40:40 -08:00
groups : map [ string ] * Group { } ,
opts : o ,
2016-01-08 08:51:22 -08:00
block : make ( chan struct { } ) ,
2020-02-12 07:22:18 -08:00
done : make ( chan struct { } ) ,
2017-06-16 03:22:44 -07:00
logger : o . Logger ,
2013-01-07 14:24:26 -08:00
}
2018-12-28 02:20:29 -08:00
2017-11-30 06:36:34 -08:00
return m
2013-01-07 14:24:26 -08:00
}
2020-07-21 15:13:24 -07:00
// Run starts processing of the rule manager. It is blocking.
2016-01-08 08:51:22 -08:00
func ( m * Manager ) Run ( ) {
2022-05-20 14:26:06 -07:00
level . Info ( m . logger ) . Log ( "msg" , "Starting rule manager..." )
2020-07-21 15:13:24 -07:00
m . start ( )
<- m . done
}
func ( m * Manager ) start ( ) {
2016-01-08 08:51:22 -08:00
close ( m . block )
}
2015-12-14 08:40:40 -08:00
// Stop the rule manager's rule evaluation cycles.
func ( m * Manager ) Stop ( ) {
rules/manager.go: Fix race between reload and stop
On one relatively large Prometheus instance (1.7M series), I noticed
that upgrades were frequently resulting in Prometheus undergoing crash
recovery on start-up.
On closer examination, I found that Prometheus was panicking on
shutdown.
It seems that our configuration management (or misconfiguration thereof)
is reloading Prometheus then immediately restarting it, which I suspect
is causing this race:
Sep 21 15:12:42 host systemd[1]: Reloading prometheus monitoring system.
Sep 21 15:12:42 host prometheus[18734]: time="2016-09-21T15:12:42Z" level=info msg="Loading configuration file /etc/prometheus/config.yaml" source="main.go:221"
Sep 21 15:12:42 host systemd[1]: Reloaded prometheus monitoring system.
Sep 21 15:12:44 host systemd[1]: Stopping prometheus monitoring system...
Sep 21 15:12:44 host prometheus[18734]: time="2016-09-21T15:12:44Z" level=warning msg="Received SIGTERM, exiting gracefully..." source="main.go:203"
Sep 21 15:12:44 host prometheus[18734]: time="2016-09-21T15:12:44Z" level=info msg="See you next time!" source="main.go:210"
Sep 21 15:12:44 host prometheus[18734]: time="2016-09-21T15:12:44Z" level=info msg="Stopping target manager..." source="targetmanager.go:90"
Sep 21 15:12:52 host prometheus[18734]: time="2016-09-21T15:12:52Z" level=info msg="Checkpointing in-memory metrics and chunks..." source="persistence.go:548"
Sep 21 15:12:56 host prometheus[18734]: time="2016-09-21T15:12:56Z" level=warning msg="Error on ingesting out-of-order samples" numDropped=1 source="scrape.go:467"
Sep 21 15:12:56 host prometheus[18734]: time="2016-09-21T15:12:56Z" level=error msg="Error adding file watch for \"/etc/prometheus/targets\": no such file or directory" source="file.go:84"
Sep 21 15:12:56 host prometheus[18734]: time="2016-09-21T15:12:56Z" level=error msg="Error adding file watch for \"/etc/prometheus/targets\": no such file or directory" source="file.go:84"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Stopping rule manager..." source="manager.go:366"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Rule manager stopped." source="manager.go:372"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Stopping notification handler..." source="notifier.go:325"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Stopping local storage..." source="storage.go:381"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Stopping maintenance loop..." source="storage.go:383"
Sep 21 15:13:01 host prometheus[18734]: panic: close of closed channel
Sep 21 15:13:01 host prometheus[18734]: goroutine 7686074 [running]:
Sep 21 15:13:01 host prometheus[18734]: panic(0xba57a0, 0xc60c42b500)
Sep 21 15:13:01 host prometheus[18734]: /usr/local/go/src/runtime/panic.go:500 +0x1a1
Sep 21 15:13:01 host prometheus[18734]: github.com/prometheus/prometheus/rules.(*Manager).ApplyConfig.func1(0xc6645a9901, 0xc420271ef0, 0xc420338ed0, 0xc60c42b4f0, 0xc6645a9900)
Sep 21 15:13:01 host prometheus[18734]: /home/build/packages/prometheus/tmp/build/gopath/src/github.com/prometheus/prometheus/rules/manager.go:412 +0x3c
Sep 21 15:13:01 host prometheus[18734]: created by github.com/prometheus/prometheus/rules.(*Manager).ApplyConfig
Sep 21 15:13:01 host prometheus[18734]: /home/build/packages/prometheus/tmp/build/gopath/src/github.com/prometheus/prometheus/rules/manager.go:423 +0x56b
Sep 21 15:13:03 host systemd[1]: prometheus.service: main process exited, code=exited, status=2/INVALIDARGUMENT
2016-09-21 14:03:02 -07:00
m . mtx . Lock ( )
defer m . mtx . Unlock ( )
2017-08-11 11:45:52 -07:00
level . Info ( m . logger ) . Log ( "msg" , "Stopping rule manager..." )
2015-06-30 02:51:05 -07:00
2015-12-14 08:40:40 -08:00
for _ , eg := range m . groups {
eg . stop ( )
2015-06-30 02:51:05 -07:00
}
2020-02-12 07:22:18 -08:00
// Shut down the groups waiting multiple evaluation intervals to write
// staleness markers.
close ( m . done )
2017-08-11 11:45:52 -07:00
level . Info ( m . logger ) . Log ( "msg" , "Rule manager stopped" )
2015-06-30 02:51:05 -07:00
}
2017-11-23 06:48:14 -08:00
// Update the rule manager's state as the config requires. If
2016-07-11 07:24:54 -07:00
// loading the new rules failed the old rule set is restored.
2023-04-04 11:21:13 -07:00
func ( m * Manager ) Update ( interval time . Duration , files [ ] string , externalLabels labels . Labels , externalURL string , groupEvalIterationFunc GroupEvalIterationFunc ) error {
2015-12-14 08:40:40 -08:00
m . mtx . Lock ( )
defer m . mtx . Unlock ( )
2015-05-12 07:52:56 -07:00
2023-04-04 11:21:13 -07:00
groups , errs := m . LoadGroups ( interval , externalLabels , externalURL , groupEvalIterationFunc , files ... )
2022-03-28 17:16:46 -07:00
2017-06-16 04:14:33 -07:00
if errs != nil {
for _ , e := range errs {
2017-08-11 11:45:52 -07:00
level . Error ( m . logger ) . Log ( "msg" , "loading groups failed" , "err" , e )
2017-06-16 04:14:33 -07:00
}
return errors . New ( "error loading rules, previous rule set restored" )
2015-05-12 07:52:56 -07:00
}
2018-08-02 03:18:24 -07:00
m . restored = true
2015-06-23 03:07:53 -07:00
2015-12-14 08:40:40 -08:00
var wg sync . WaitGroup
for _ , newg := range groups {
2019-12-19 02:41:11 -08:00
// If there is an old group with the same identifier,
// check if new group equals with the old group, if yes then skip it.
// If not equals, stop it and wait for it to finish the current iteration.
// Then copy it into the new group.
2020-09-13 08:07:59 -07:00
gn := GroupKey ( newg . file , newg . name )
2017-11-01 04:58:00 -07:00
oldg , ok := m . groups [ gn ]
delete ( m . groups , gn )
2015-12-14 08:40:40 -08:00
2019-12-19 02:41:11 -08:00
if ok && oldg . Equals ( newg ) {
groups [ gn ] = oldg
continue
}
wg . Add ( 1 )
2020-03-01 10:32:14 -08:00
go func ( newg * Group ) {
2015-12-14 08:40:40 -08:00
if ok {
oldg . stop ( )
2018-07-18 06:14:38 -07:00
newg . CopyState ( oldg )
2015-12-14 08:40:40 -08:00
}
wg . Done ( )
2020-09-21 03:29:03 -07:00
// Wait with starting evaluation until the rule manager
// is told to run. This is necessary to avoid running
// queries against a bootstrapping storage.
<- m . block
newg . run ( m . opts . Context )
2020-03-01 10:32:14 -08:00
} ( newg )
2015-12-14 08:40:40 -08:00
}
// Stop remaining old groups.
2020-02-12 07:22:18 -08:00
wg . Add ( len ( m . groups ) )
2020-01-27 04:41:32 -08:00
for n , oldg := range m . groups {
2020-02-12 07:22:18 -08:00
go func ( n string , g * Group ) {
2020-04-18 05:32:18 -07:00
g . markStale = true
g . stop ( )
2020-02-12 07:22:18 -08:00
if m := g . metrics ; m != nil {
2021-04-30 10:25:34 -07:00
m . IterationsMissed . DeleteLabelValues ( n )
m . IterationsScheduled . DeleteLabelValues ( n )
m . EvalTotal . DeleteLabelValues ( n )
m . EvalFailures . DeleteLabelValues ( n )
m . GroupInterval . DeleteLabelValues ( n )
m . GroupLastEvalTime . DeleteLabelValues ( n )
m . GroupLastDuration . DeleteLabelValues ( n )
m . GroupRules . DeleteLabelValues ( n )
m . GroupSamples . DeleteLabelValues ( ( n ) )
2020-02-12 07:22:18 -08:00
}
wg . Done ( )
} ( n , oldg )
2015-12-14 08:40:40 -08:00
}
wg . Wait ( )
m . groups = groups
2016-07-11 07:24:54 -07:00
return nil
2015-05-12 07:52:56 -07:00
}
2020-07-22 07:19:34 -07:00
// GroupLoader is responsible for loading rule groups from arbitrary sources and parsing them.
type GroupLoader interface {
Load ( identifier string ) ( * rulefmt . RuleGroups , [ ] error )
Parse ( query string ) ( parser . Expr , error )
}
// FileLoader is the default GroupLoader implementation. It defers to rulefmt.ParseFile
// and parser.ParseExpr
type FileLoader struct { }
func ( FileLoader ) Load ( identifier string ) ( * rulefmt . RuleGroups , [ ] error ) {
return rulefmt . ParseFile ( identifier )
}
func ( FileLoader ) Parse ( query string ) ( parser . Expr , error ) { return parser . ParseExpr ( query ) }
2018-09-25 09:06:26 -07:00
// LoadGroups reads groups from a list of files.
2019-04-15 09:52:58 -07:00
func ( m * Manager ) LoadGroups (
2023-04-04 11:21:13 -07:00
interval time . Duration , externalLabels labels . Labels , externalURL string , groupEvalIterationFunc GroupEvalIterationFunc , filenames ... string ,
2019-04-15 09:52:58 -07:00
) ( map [ string ] * Group , [ ] error ) {
2017-06-12 05:44:39 -07:00
groups := make ( map [ string ] * Group )
2018-08-02 03:18:24 -07:00
shouldRestore := ! m . restored
2015-04-29 02:08:56 -07:00
for _ , fn := range filenames {
2020-07-22 07:19:34 -07:00
rgs , errs := m . opts . GroupLoader . Load ( fn )
2017-06-13 23:49:21 -07:00
if errs != nil {
2017-06-16 04:14:33 -07:00
return nil , errs
2015-04-29 02:08:56 -07:00
}
2015-07-03 05:48:22 -07:00
2017-06-12 05:44:39 -07:00
for _ , rg := range rgs . Groups {
itv := interval
2017-06-15 22:16:21 -07:00
if rg . Interval != 0 {
itv = time . Duration ( rg . Interval )
2017-06-12 05:44:39 -07:00
}
2015-12-14 08:40:40 -08:00
2017-06-13 22:43:00 -07:00
rules := make ( [ ] Rule , 0 , len ( rg . Rules ) )
2017-06-12 05:44:39 -07:00
for _ , r := range rg . Rules {
2020-07-22 07:19:34 -07:00
expr , err := m . opts . GroupLoader . Parse ( r . Expr . Value )
2017-06-12 05:44:39 -07:00
if err != nil {
2022-06-17 00:54:25 -07:00
return nil , [ ] error { fmt . Errorf ( "%s: %w" , fn , err ) }
2017-06-12 05:44:39 -07:00
}
2015-12-14 08:40:40 -08:00
2020-01-15 10:07:54 -08:00
if r . Alert . Value != "" {
2017-06-12 05:44:39 -07:00
rules = append ( rules , NewAlertingRule (
2020-01-15 10:07:54 -08:00
r . Alert . Value ,
2017-06-12 05:44:39 -07:00
expr ,
2017-06-15 22:16:21 -07:00
time . Duration ( r . For ) ,
2023-01-09 03:21:38 -08:00
time . Duration ( r . KeepFiringFor ) ,
2017-06-12 05:44:39 -07:00
labels . FromMap ( r . Labels ) ,
labels . FromMap ( r . Annotations ) ,
2019-04-15 09:52:58 -07:00
externalLabels ,
2021-05-30 20:35:26 -07:00
externalURL ,
2018-08-02 03:18:24 -07:00
m . restored ,
2017-08-11 11:45:52 -07:00
log . With ( m . logger , "alert" , r . Alert ) ,
2017-06-12 05:44:39 -07:00
) )
continue
}
rules = append ( rules , NewRecordingRule (
2020-01-15 10:07:54 -08:00
r . Record . Value ,
2017-06-12 05:44:39 -07:00
expr ,
labels . FromMap ( r . Labels ) ,
) )
2015-04-29 02:08:56 -07:00
}
2017-06-12 05:44:39 -07:00
2020-09-13 08:07:59 -07:00
groups [ GroupKey ( fn , rg . Name ) ] = NewGroup ( GroupOptions {
2023-04-04 11:21:13 -07:00
Name : rg . Name ,
File : fn ,
Interval : itv ,
Limit : rg . Limit ,
Rules : rules ,
ShouldRestore : shouldRestore ,
Opts : m . opts ,
done : m . done ,
EvalIterationFunc : groupEvalIterationFunc ,
2020-02-12 07:22:18 -08:00
} )
2015-04-29 02:08:56 -07:00
}
}
2015-12-14 08:40:40 -08:00
return groups , nil
2013-01-07 14:24:26 -08:00
}
2013-06-11 02:00:55 -07:00
2020-09-13 08:07:59 -07:00
// GroupKey group names need not be unique across filenames.
func GroupKey ( file , name string ) string {
2020-01-27 04:41:32 -08:00
return file + ";" + name
2017-11-01 04:58:00 -07:00
}
2017-06-14 03:39:14 -07:00
// RuleGroups returns the list of manager's rule groups.
func ( m * Manager ) RuleGroups ( ) [ ] * Group {
m . mtx . RLock ( )
defer m . mtx . RUnlock ( )
rgs := make ( [ ] * Group , 0 , len ( m . groups ) )
for _ , g := range m . groups {
rgs = append ( rgs , g )
}
2023-07-02 15:16:26 -07:00
slices . SortFunc ( rgs , func ( a , b * Group ) bool {
if a . file != b . file {
return a . file < b . file
2019-02-23 00:51:44 -08:00
}
2023-07-02 15:16:26 -07:00
return a . name < b . name
2017-06-14 03:39:14 -07:00
} )
return rgs
}
2015-04-29 02:08:56 -07:00
// Rules returns the list of the manager's rules.
2015-04-29 01:26:49 -07:00
func ( m * Manager ) Rules ( ) [ ] Rule {
2015-12-14 08:40:40 -08:00
m . mtx . RLock ( )
defer m . mtx . RUnlock ( )
var rules [ ] Rule
for _ , g := range m . groups {
rules = append ( rules , g . rules ... )
}
2013-06-11 02:00:55 -07:00
return rules
}
2013-06-13 07:10:05 -07:00
2015-04-29 02:08:56 -07:00
// AlertingRules returns the list of the manager's alerting rules.
2015-04-29 01:26:49 -07:00
func ( m * Manager ) AlertingRules ( ) [ ] * AlertingRule {
2015-03-30 10:43:19 -07:00
alerts := [ ] * AlertingRule { }
2015-12-14 08:40:40 -08:00
for _ , rule := range m . Rules ( ) {
2015-03-30 10:43:19 -07:00
if alertingRule , ok := rule . ( * AlertingRule ) ; ok {
2013-06-13 07:10:05 -07:00
alerts = append ( alerts , alertingRule )
}
}
2019-05-14 14:14:27 -07:00
2013-06-13 07:10:05 -07:00
return alerts
}
2022-10-07 07:58:17 -07:00
type Sender interface {
Send ( alerts ... * notifier . Alert )
}
// SendAlerts implements the rules.NotifyFunc for a Notifier.
func SendAlerts ( s Sender , externalURL string ) NotifyFunc {
return func ( ctx context . Context , expr string , alerts ... * Alert ) {
var res [ ] * notifier . Alert
for _ , alert := range alerts {
a := & notifier . Alert {
StartsAt : alert . FiredAt ,
Labels : alert . Labels ,
Annotations : alert . Annotations ,
GeneratorURL : externalURL + strutil . TableLinkForExpression ( expr ) ,
}
if ! alert . ResolvedAt . IsZero ( ) {
a . EndsAt = alert . ResolvedAt
} else {
a . EndsAt = alert . ValidUntil
}
res = append ( res , a )
}
if len ( alerts ) > 0 {
s . Send ( res ... )
}
}
}