promtool: Calculate mint and maxt per test (#8096)

* promtool: Calculate mint and maxt per test

Previously a single test that used a later eval time would make all
other tests in the file share the [mint, maxt] and potentially evaluate
far more samples than needed.

Fixes: #8019

Signed-off-by: David Leadbeater <dgl@dgl.cx>
This commit is contained in:
David Leadbeater 2020-10-24 12:03:55 +01:00 committed by GitHub
parent 712477be5a
commit e7e60623ff
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 234 additions and 23 deletions

View file

@ -0,0 +1,4 @@
tests:
- input_series:
- series: 'up{job="prometheus", instance="localhost:9090"'
values: "0+0x1440"

12
cmd/promtool/testdata/bad-promql.yml vendored Normal file
View file

@ -0,0 +1,12 @@
tests:
- input_series:
- series: 'join_1{a="1",b="2"}'
values: 1
- series: 'join_2{a="1",b="3"}'
values: 2
- series: 'join_2{a="1",b="4"}'
values: 3
promql_expr_test:
# This PromQL generates an error.
- expr: "join_1 + on(a) join_2"

View file

@ -0,0 +1,14 @@
rule_files:
- bad-rules-error.yml
tests:
- input_series:
- series: 'join_1{a="1",b="2"}'
values: 1
- series: 'join_2{a="1",b="3"}'
values: 2
- series: 'join_2{a="1",b="4"}'
values: 3
# Just the existance of the data, that can't be joined, makes the recording
# rules error.

View file

@ -0,0 +1,7 @@
# This is the rules file for bad-rules-error-test.yml.
groups:
- name: bad-example
rules:
- record: joined
expr: join_1 + on(a) join_2

View file

@ -0,0 +1,6 @@
rule_files:
- bad-rules-syntax.yml
tests:
# Need a test to ensure the recording rules actually run.
- {}

View file

@ -0,0 +1,7 @@
# This is the rules file for bad-rules-syntax-test.yml.
groups:
- name: bad-syntax
rules:
- record: x
expr: 'test +'

19
cmd/promtool/testdata/failing.yml vendored Normal file
View file

@ -0,0 +1,19 @@
tests:
# Simple failing test.
- interval: 1m
input_series:
- series: test
values: '0'
promql_expr_test:
- expr: test
eval_time: 0m
exp_samples:
- value: 1
labels: test
alert_rule_test:
- eval_time: 0m
alertname: Test
exp_alerts:
- exp_labels: {}

View file

@ -1,7 +1,7 @@
# This is the rules file.
groups:
- name: example
- name: alerts
rules:
- alert: InstanceDown
expr: up == 0
@ -11,3 +11,12 @@ groups:
annotations:
summary: "Instance {{ $labels.instance }} down"
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."
- name: rules
rules:
- record: job:test:count_over_time1m
expr: sum without(instance) (count_over_time(test[1m]))
# A recording rule that doesn't depend on input series.
- record: fixed_data
expr: 1

View file

@ -1,9 +1,79 @@
rule_files:
- alerts.yml
- rules.yml
evaluation_interval: 1m
tests:
# Basic tests for promql_expr_test, not dependent on rules.
- interval: 1m
input_series:
- series: test_full
values: '0 0'
- series: test_stale
values: '0 stale'
- series: test_missing
values: '0 _ _ _ _ _ _ 0'
promql_expr_test:
# Ensure the sample is evaluated at the time we expect it to be.
- expr: timestamp(test_full)
eval_time: 0m
exp_samples:
- value: 0
- expr: timestamp(test_full)
eval_time: 1m
exp_samples:
- value: 60
- expr: timestamp(test_full)
eval_time: 2m
exp_samples:
- value: 60
# Ensure a value is stale as soon as it is marked as such.
- expr: test_stale
eval_time: 59s
exp_samples:
- value: 0
labels: 'test_stale'
- expr: test_stale
eval_time: 1m
exp_samples: []
# Ensure lookback delta is respected, when a value is missing.
- expr: timestamp(test_missing)
eval_time: 5m
exp_samples:
- value: 0
- expr: timestamp(test_missing)
eval_time: 5m1s
exp_samples: []
# Minimal test case to check edge case of a single sample.
- input_series:
- series: test
values: 1
promql_expr_test:
- expr: test
eval_time: 0
exp_samples:
- value: 1
labels: test
# Test recording rules run even if input_series isn't provided.
- promql_expr_test:
- expr: count_over_time(fixed_data[1h])
eval_time: 1h
exp_samples:
- value: 61
- expr: timestamp(fixed_data)
eval_time: 1h
exp_samples:
- value: 3600
# Tests for alerting rules.
- interval: 1m
input_series:
- series: 'up{job="prometheus", instance="localhost:9090"}'
@ -27,3 +97,45 @@ tests:
exp_annotations:
summary: "Instance localhost:9090 down"
description: "localhost:9090 of job prometheus has been down for more than 5 minutes."
# Tests for interval vs evaluation_interval.
- interval: 1s
input_series:
- series: 'test{job="test", instance="x:0"}'
# 2 minutes + 1 second of input data, recording rules should only run
# once a minute.
values: '0+1x120'
promql_expr_test:
- expr: job:test:count_over_time1m
eval_time: 0m
exp_samples:
- value: 1
labels: 'job:test:count_over_time1m{job="test"}'
- expr: timestamp(job:test:count_over_time1m)
eval_time: 10s
exp_samples:
- value: 0
labels: '{job="test"}'
- expr: job:test:count_over_time1m
eval_time: 1m
exp_samples:
- value: 61
labels: 'job:test:count_over_time1m{job="test"}'
- expr: timestamp(job:test:count_over_time1m)
eval_time: 1m10s
exp_samples:
- value: 60
labels: '{job="test"}'
- expr: job:test:count_over_time1m
eval_time: 2m
exp_samples:
- value: 61
labels: 'job:test:count_over_time1m{job="test"}'
- expr: timestamp(job:test:count_over_time1m)
eval_time: 2m59s999ms
exp_samples:
- value: 120
labels: '{job="test"}'

View file

@ -80,13 +80,7 @@ func ruleUnitTest(filename string) []error {
unitTestInp.EvaluationInterval = model.Duration(1 * time.Minute)
}
// Bounds for evaluating the rules.
mint := time.Unix(0, 0).UTC()
maxd := unitTestInp.maxEvalTime()
maxt := mint.Add(maxd)
evalInterval := time.Duration(unitTestInp.EvaluationInterval)
// Rounding off to nearest Eval time (> maxt).
maxt = maxt.Add(evalInterval / 2).Round(evalInterval)
// Giving number for groups mentioned in the file for ordering.
// Lower number group should be evaluated before higher number group.
@ -101,8 +95,7 @@ func ruleUnitTest(filename string) []error {
// Testing.
var errs []error
for _, t := range unitTestInp.Tests {
ers := t.test(mint, maxt, evalInterval, groupOrderMap,
unitTestInp.RuleFiles...)
ers := t.test(evalInterval, groupOrderMap, unitTestInp.RuleFiles...)
if ers != nil {
errs = append(errs, ers...)
}
@ -122,17 +115,6 @@ type unitTestFile struct {
Tests []testGroup `yaml:"tests"`
}
func (utf *unitTestFile) maxEvalTime() time.Duration {
var maxd time.Duration
for _, t := range utf.Tests {
d := t.maxEvalTime()
if d > maxd {
maxd = d
}
}
return maxd
}
// resolveAndGlobFilepaths joins all relative paths in a configuration
// with a given base directory and replaces all globs with matching files.
func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error {
@ -167,7 +149,7 @@ type testGroup struct {
}
// test performs the unit tests.
func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, groupOrderMap map[string]int, ruleFiles ...string) []error {
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, ruleFiles ...string) []error {
// Setup testing suite.
suite, err := promql.NewLazyLoader(nil, tg.seriesLoadingString())
if err != nil {
@ -190,6 +172,10 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
}
groups := orderedGroups(groupsMap, groupOrderMap)
// Bounds for evaluating the rules.
mint := time.Unix(0, 0).UTC()
maxt := mint.Add(tg.maxEvalTime())
// Pre-processing some data for testing alerts.
// All this preparation is so that we can test alerts as we evaluate the rules.
// This avoids storing them in memory, as the number of evals might be high.
@ -232,7 +218,7 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
}
var errs []error
for ts := mint; ts.Before(maxt); ts = ts.Add(evalInterval) {
for ts := mint; ts.Before(maxt) || ts.Equal(maxt); ts = ts.Add(evalInterval) {
// Collects the alerts asked for unit testing.
suite.WithSamplesTill(ts, func(err error) {
if err != nil {

View file

@ -31,6 +31,41 @@ func TestRulesUnitTest(t *testing.T) {
},
want: 0,
},
{
name: "Bad input series",
args: args{
files: []string{"./testdata/bad-input-series.yml"},
},
want: 1,
},
{
name: "Bad PromQL",
args: args{
files: []string{"./testdata/bad-promql.yml"},
},
want: 1,
},
{
name: "Bad rules (syntax error)",
args: args{
files: []string{"./testdata/bad-rules-syntax-test.yml"},
},
want: 1,
},
{
name: "Bad rules (error evaluating)",
args: args{
files: []string{"./testdata/bad-rules-error-test.yml"},
},
want: 1,
},
{
name: "Simple failing test",
args: args{
files: []string{"./testdata/failing.yml"},
},
want: 1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {