Merge pull request #15795 from prometheus/beorn7/promql

promqltest: let eval_ordered ignore annotations and improve documentation
This commit is contained in:
Björn Rabenstein 2025-01-08 16:04:14 +01:00 committed by GitHub
commit 1ea9b72997
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 78 additions and 29 deletions

View file

@ -22,7 +22,7 @@ Each test file contains a series of commands. There are three kinds of commands:
* `load`
* `clear`
* `eval`
* `eval` (including the variants `eval_fail`, `eval_warn`, `eval_info`, and `eval_ordered`)
Each command is executed in the order given in the file.
@ -50,12 +50,12 @@ load 1m
my_metric{env="prod"} 5 2+3x2 _ stale {{schema:1 sum:3 count:22 buckets:[5 10 7]}}
```
...will create a single series with labels `my_metric{env="prod"}`, with the following points:
will create a single series with labels `my_metric{env="prod"}`, with the following points:
* t=0: value is 5
* t=1m: value is 2
* t=2m: value is 5
* t=3m: value is 7
* t=3m: value is 8
* t=4m: no point
* t=5m: stale marker
* t=6m: native histogram with schema 1, sum -3, count 22 and bucket counts 5, 10 and 7
@ -74,6 +74,7 @@ When loading a batch of classic histogram float series, you can optionally appen
## `eval` command
`eval` runs a query against the test environment and asserts that the result is as expected.
It requires the query to succeed without any (info or warn) annotations.
Both instant and range queries are supported.
@ -110,11 +111,18 @@ eval range from 0 to 3m step 1m sum by (env) (my_metric)
{env="test"} 10 20 30 45
```
Instant queries also support asserting that the series are returned in exactly the order specified: use `eval_ordered instant ...` instead of `eval instant ...`.
This is not supported for range queries.
To assert that a query succeeds with an info or warn annotation, use the
`eval_info` or `eval_warn` commands, respectively.
It is also possible to test that queries fail: use `eval_fail instant ...` or `eval_fail range ...`.
`eval_fail` optionally takes an expected error message string or regexp to assert that the error message is as expected.
Instant queries also support asserting that the series are returned in exactly
the order specified: use `eval_ordered instant ...` instead of `eval instant
...`. `eval_ordered` ignores any annotations. The assertion always fails for
matrix results.
To assert that a query fails, use the `eval_fail` command. `eval_fail` does not
expect any result lines. Instead, it optionally accepts an expected error
message string or regular expression to assert that the error message is as
expected.
For example:

View file

@ -39,6 +39,7 @@ import (
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/almost"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/convertnhcb"
"github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil"
@ -692,6 +693,24 @@ func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.Sequenc
ev.expected[h] = entry{pos: pos, vals: vals}
}
// checkAnnotations asserts if the annotations match the expectations.
func (ev *evalCmd) checkAnnotations(expr string, annos annotations.Annotations) error {
countWarnings, countInfo := annos.CountWarningsAndInfo()
switch {
case ev.ordered:
// Ignore annotations if testing for order.
case !ev.warn && countWarnings > 0:
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", expr, ev.line, annos.AsErrors())
case ev.warn && countWarnings == 0:
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", expr, ev.line)
case !ev.info && countInfo > 0:
return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", expr, ev.line, annos.AsErrors())
case ev.info && countInfo == 0:
return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", expr, ev.line)
}
return nil
}
// compareResult compares the result value with the defined expectation.
func (ev *evalCmd) compareResult(result parser.Value) error {
switch val := result.(type) {
@ -1131,6 +1150,7 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
if err != nil {
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
}
defer q.Close()
res := q.Exec(t.context)
if res.Err != nil {
if cmd.fail {
@ -1142,18 +1162,9 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
countWarnings, countInfo := res.Warnings.CountWarningsAndInfo()
switch {
case !cmd.warn && countWarnings > 0:
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings)
case cmd.warn && countWarnings == 0:
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
case !cmd.info && countInfo > 0:
return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings)
case cmd.info && countInfo == 0:
return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
if err := cmd.checkAnnotations(cmd.expr, res.Warnings); err != nil {
return err
}
defer q.Close()
if err := cmd.compareResult(res.Value); err != nil {
return fmt.Errorf("error in %s %s (line %d): %w", cmd, cmd.expr, cmd.line, err)
@ -1196,16 +1207,8 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
countWarnings, countInfo := res.Warnings.CountWarningsAndInfo()
switch {
case !cmd.warn && countWarnings > 0:
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
case cmd.warn && countWarnings == 0:
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line)
case !cmd.info && countInfo > 0:
return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
case cmd.info && countInfo == 0:
return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", iq.expr, cmd.line)
if err := cmd.checkAnnotations(iq.expr, res.Warnings); err != nil {
return err
}
err = cmd.compareResult(res.Value)
if err != nil {
@ -1218,11 +1221,11 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
if err != nil {
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
}
defer q.Close()
rangeRes := q.Exec(t.context)
if rangeRes.Err != nil {
return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
}
defer q.Close()
if cmd.ordered {
// Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
return nil

View file

@ -353,6 +353,44 @@ eval_ordered instant at 50m sort(http_requests)
`,
expectedError: `error in eval sort(http_requests) (line 10): unexpected metric {__name__="http_requests", group="canary", instance="1", job="api-server"} in result, has value 400`,
},
"instant query with results expected to match provided order, result is in expected order and info annotation is ignored": {
input: testData + `
eval_ordered instant at 50m sort(rate(http_requests[10m]))
{group="production", instance="0", job="api-server"} 0.03333333333333333
{group="production", instance="1", job="api-server"} 0.06666666666666667
{group="canary", instance="0", job="api-server"} 0.1
{group="canary", instance="1", job="api-server"} 0.13333333333333333
`,
},
"instant query with expected info annotation": {
input: testData + `
eval_info instant at 50m sort(rate(http_requests[10m]))
{group="production", instance="0", job="api-server"} 0.03333333333333333
{group="production", instance="1", job="api-server"} 0.06666666666666667
{group="canary", instance="0", job="api-server"} 0.1
{group="canary", instance="1", job="api-server"} 0.13333333333333333
`,
},
"instant query with unexpected info annotation": {
input: testData + `
eval instant at 50m sort(rate(http_requests[10m]))
{group="production", instance="0", job="api-server"} 0.03333333333333333
{group="production", instance="1", job="api-server"} 0.06666666666666667
{group="canary", instance="0", job="api-server"} 0.1
{group="canary", instance="1", job="api-server"} 0.13333333333333333
`,
expectedError: `unexpected info annotations evaluating query "sort(rate(http_requests[10m]))" (line 10): [PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "http_requests"]`,
},
"instant query with unexpectedly missing warn annotation": {
input: testData + `
eval_warn instant at 50m sort(rate(http_requests[10m]))
{group="production", instance="0", job="api-server"} 0.03333333333333333
{group="production", instance="1", job="api-server"} 0.06666666666666667
{group="canary", instance="0", job="api-server"} 0.1
{group="canary", instance="1", job="api-server"} 0.13333333333333333
`,
expectedError: `expected warnings evaluating query "sort(rate(http_requests[10m]))" (line 10) but got none`,
},
"instant query with invalid timestamp": {
input: `eval instant at abc123 vector(0)`,
expectedError: `error in eval vector(0) (line 1): invalid timestamp definition "abc123": not a valid duration string: "abc123"`,