This commit is contained in:
Neeraj Gartia 2025-03-05 16:25:35 +01:00 committed by GitHub
commit eafaab3dc4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 448 additions and 15 deletions

View file

@ -20,6 +20,7 @@ import (
"fmt"
"io/fs"
"math"
"slices"
"sort"
"strconv"
"strings"
@ -50,6 +51,7 @@ var (
patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`)
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered|info))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn|info))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
patExpect = regexp.MustCompile(`^expect\s+(ordered|fail|warn|no_warn|info|no_info)(?:\s+(regex|msg):(.+))?$`)
)
const (
@ -264,6 +266,41 @@ func parseSeries(defLine string, line int) (labels.Labels, []parser.SequenceValu
return metric, vals, nil
}
func parseExpect(defLine string) (expectCmdType, expectCmd, error) {
expectParts := patExpect.FindStringSubmatch(strings.TrimSpace(defLine))
expCmd := expectCmd{}
if expectParts == nil {
return 0, expCmd, fmt.Errorf("invalid expect statement, must match `%s`", patExpect.String())
}
var (
mode = expectParts[1]
hasOptionalPart = expectParts[2] != ""
matchAnyRegex = regexp.MustCompile(`^.*$`)
)
expectType, ok := expectTypeStr[mode]
if !ok {
return 0, expCmd, fmt.Errorf("invalid expected error/annotation type %s", mode)
}
if hasOptionalPart {
switch expectParts[2] {
case "msg":
expCmd.message = strings.TrimSpace(expectParts[3])
case "regex":
pattern := strings.TrimSpace(expectParts[3])
regex, err := regexp.Compile(pattern)
if err != nil {
return 0, expCmd, fmt.Errorf("invalid regex %s for %s", pattern, mode)
}
expCmd.regex = regex
default:
return 0, expCmd, fmt.Errorf("invalid token %s after %s", expectParts[2], mode)
}
} else {
expCmd.regex = matchAnyRegex
}
return expectType, expCmd, nil
}
func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
instantParts := patEvalInstant.FindStringSubmatch(lines[i])
rangeParts := patEvalRange.FindStringSubmatch(lines[i])
@ -376,6 +413,17 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
break
}
// This would still allow a metric named 'expect' if it is written as 'expect{}'.
if strings.Split(defLine, " ")[0] == "expect" {
annoType, expectedAnno, err := parseExpect(defLine)
if err != nil {
return i, nil, formatErr("%w", err)
}
cmd.expectedCmds[annoType] = append(cmd.expectedCmds[annoType], expectedAnno)
j--
continue
}
if f, err := parseNumber(defLine); err == nil {
cmd.expect(0, parser.SequenceValue{Value: f})
break
@ -633,11 +681,60 @@ type evalCmd struct {
expectedFailMessage string
expectedFailRegexp *regexp.Regexp
expectedCmds map[expectCmdType][]expectCmd
metrics map[uint64]labels.Labels
expectScalar bool
expected map[uint64]entry
}
func (ev *evalCmd) isOrdered() bool {
return ev.ordered || (len(ev.expectedCmds[Ordered]) > 0)
}
func (ev *evalCmd) isFail() bool {
return ev.fail || (len(ev.expectedCmds[Fail]) > 0)
}
type expectCmdType byte
const (
Ordered expectCmdType = iota
Fail
Warn
NoWarn
Info
NoInfo
)
var expectTypeStr = map[string]expectCmdType{
"fail": Fail,
"ordered": Ordered,
"warn": Warn,
"no_warn": NoWarn,
"info": Info,
"no_info": NoInfo,
}
type expectCmd struct {
message string
regex *regexp.Regexp
}
func (e *expectCmd) CheckMatch(str string) bool {
if e.regex == nil {
return e.message == str
}
return e.regex.MatchString(str)
}
func (e *expectCmd) String() string {
if e.regex == nil {
return e.message
}
return e.regex.String()
}
type entry struct {
pos int
vals []parser.SequenceValue
@ -653,8 +750,9 @@ func newInstantEvalCmd(expr string, start time.Time, line int) *evalCmd {
start: start,
line: line,
metrics: map[uint64]labels.Labels{},
expected: map[uint64]entry{},
metrics: map[uint64]labels.Labels{},
expected: map[uint64]entry{},
expectedCmds: map[expectCmdType][]expectCmd{},
}
}
@ -667,8 +765,9 @@ func newRangeEvalCmd(expr string, start, end time.Time, step time.Duration, line
line: line,
isRange: true,
metrics: map[uint64]labels.Labels{},
expected: map[uint64]entry{},
metrics: map[uint64]labels.Labels{},
expected: map[uint64]entry{},
expectedCmds: map[expectCmdType][]expectCmd{},
}
}
@ -693,21 +792,71 @@ func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.Sequenc
ev.expected[h] = entry{pos: pos, vals: vals}
}
// validateExpectedAnnotations validates expected messages and regex match actual annotations.
func validateExpectedAnnotations(expr string, expectedAnnotations []expectCmd, actualAnnotations []string, line int, annotationType string) error {
if len(expectedAnnotations) == 0 {
return nil
}
if len(actualAnnotations) == 0 {
return fmt.Errorf("expected %s annotations but none were found for query %q (line %d)", annotationType, expr, line)
}
// Check if all expected annotations are found in actual.
for _, e := range expectedAnnotations {
matchFound := slices.ContainsFunc(actualAnnotations, e.CheckMatch)
if !matchFound {
return fmt.Errorf("expected %s annotation %q but no matching annotation was found for query %q (line %d)", annotationType, e.String(), expr, line)
}
}
// Check if all actual annotations have a corresponding expected annotation.
for _, anno := range actualAnnotations {
matchFound := slices.ContainsFunc(expectedAnnotations, func(e expectCmd) bool {
return e.CheckMatch(anno)
})
if !matchFound {
return fmt.Errorf("unexpected %s annotation %q found for query %s (line %d)", annotationType, anno, expr, line)
}
}
return nil
}
// checkAnnotations asserts if the annotations match the expectations.
func (ev *evalCmd) checkAnnotations(expr string, annos annotations.Annotations) error {
countWarnings, countInfo := annos.CountWarningsAndInfo()
switch {
case ev.ordered:
// Ignore annotations if testing for order.
case !ev.warn && countWarnings > 0:
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", expr, ev.line, annos.AsErrors())
case ev.warn && countWarnings == 0:
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", expr, ev.line)
case !ev.info && countInfo > 0:
return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", expr, ev.line, annos.AsErrors())
case ev.info && countInfo == 0:
return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", expr, ev.line)
}
var warnings, infos []string
for _, err := range annos {
switch {
case errors.Is(err, annotations.PromQLWarning):
warnings = append(warnings, err.Error())
case errors.Is(err, annotations.PromQLInfo):
infos = append(infos, err.Error())
}
}
if err := validateExpectedAnnotations(expr, ev.expectedCmds[Warn], warnings, ev.line, "warn"); err != nil {
return err
}
if err := validateExpectedAnnotations(expr, ev.expectedCmds[Info], infos, ev.line, "info"); err != nil {
return err
}
if ev.expectedCmds[NoWarn] != nil && len(warnings) > 0 {
return fmt.Errorf("unexpected warning annotations evaluating query %q (line %d): %v", expr, ev.line, annos.AsErrors())
}
if ev.expectedCmds[NoInfo] != nil && len(infos) > 0 {
return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", expr, ev.line, annos.AsErrors())
}
return nil
}
@ -715,7 +864,7 @@ func (ev *evalCmd) checkAnnotations(expr string, annos annotations.Annotations)
func (ev *evalCmd) compareResult(result parser.Value) error {
switch val := result.(type) {
case promql.Matrix:
if ev.ordered {
if ev.isOrdered() {
return errors.New("expected ordered result, but query returned a matrix")
}
@ -806,7 +955,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
return fmt.Errorf("unexpected metric %s in result, has value %v", v.Metric, v.F)
}
exp := ev.expected[fp]
if ev.ordered && exp.pos != pos+1 {
if ev.isOrdered() && exp.pos != pos+1 {
return fmt.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1)
}
exp0 := exp.vals[0]
@ -978,6 +1127,16 @@ func (ev *evalCmd) checkExpectedFailure(actual error) error {
}
}
expFail, exists := ev.expectedCmds[Fail]
if exists && len(expFail) > 0 {
if len(expFail) > 1 {
return fmt.Errorf("expected more than one error evaluating query %q (line %d), but got only one", ev.expr, ev.line)
}
if !expFail[0].CheckMatch(actual.Error()) {
return fmt.Errorf("expected error matching %q evaluating query %q (line %d), but got: %s", expFail[0].String(), ev.expr, ev.line, actual.Error())
}
}
// We're not expecting a particular error, or we got the error we expected.
// This test passes.
return nil
@ -1153,13 +1312,13 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
defer q.Close()
res := q.Exec(t.context)
if res.Err != nil {
if cmd.fail {
if cmd.isFail() {
return cmd.checkExpectedFailure(res.Err)
}
return fmt.Errorf("error evaluating query %q (line %d): %w", cmd.expr, cmd.line, res.Err)
}
if res.Err == nil && cmd.fail {
if res.Err == nil && cmd.isFail() {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
if err := cmd.checkAnnotations(cmd.expr, res.Warnings); err != nil {
@ -1195,7 +1354,7 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
defer q.Close()
res := q.Exec(t.context)
if res.Err != nil {
if cmd.fail {
if cmd.isFail() {
if err := cmd.checkExpectedFailure(res.Err); err != nil {
return err
}
@ -1204,7 +1363,7 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
}
return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err)
}
if res.Err == nil && cmd.fail {
if res.Err == nil && cmd.isFail() {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
if err := cmd.checkAnnotations(iq.expr, res.Warnings); err != nil {
@ -1226,7 +1385,7 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
if rangeRes.Err != nil {
return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
}
if cmd.ordered {
if cmd.isOrdered() {
// Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
return nil
}

View file

@ -374,6 +374,7 @@ eval_info instant at 50m sort(rate(http_requests[10m]))
"instant query with unexpected info annotation": {
input: testData + `
eval instant at 50m sort(rate(http_requests[10m]))
expect no_info
{group="production", instance="0", job="api-server"} 0.03333333333333333
{group="production", instance="1", job="api-server"} 0.06666666666666667
{group="canary", instance="0", job="api-server"} 0.1
@ -637,6 +638,279 @@ eval range from 0 to 5m step 5m testmetric
3 @[30000]
3 @[60000]`,
},
"instant query expected to fail with specific error message, and query fails with that error (with new eval syntax)": {
input: `
load 5m
testmetric1{src="a",dst="b"} 0
testmetric2{src="a",dst="b"} 1
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
expect fail msg: vector cannot contain metrics with the same labelset
`,
},
"instant query expected to fail with specific error message, and query does not fail with that error (with new eval syntax)": {
input: `
load 5m
testmetric1{src="a",dst="b"} 0
testmetric2{src="a",dst="b"} 1
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
expect fail msg: something went wrong
`,
expectedError: `expected error matching "something went wrong" evaluating query "ceil({__name__=~'testmetric1|testmetric2'})" (line 6), but got: vector cannot contain metrics with the same labelset`,
},
"instant query expected to fail with specific error regex, and query fails with that error (with new eval syntax)": {
input: `
load 5m
testmetric1{src="a",dst="b"} 0
testmetric2{src="a",dst="b"} 1
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
expect fail regex: .*labelset.*
`,
},
"instant query expected to fail with specific error regex, and query does not fail with that error (with new eval syntax)": {
input: `
load 5m
testmetric1{src="a",dst="b"} 0
testmetric2{src="a",dst="b"} 1
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
expect fail regex: something went (wrong|boom)
`,
expectedError: `expected error matching "something went (wrong|boom)" evaluating query "ceil({__name__=~'testmetric1|testmetric2'})" (line 6), but got: vector cannot contain metrics with the same labelset`,
},
"instant query expected to only to fail (with new eval syntax)": {
input: `
load 5m
testmetric1{src="a",dst="b"} 0
testmetric2{src="a",dst="b"} 1
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
expect fail
`,
},
"invalid fail syntax with error as token instead of regex or msg(with new eval syntax)": {
input: `
load 5m
testmetric1{src="a",dst="b"} 0
testmetric2{src="a",dst="b"} 1
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
expect fail error: something went wrong
`,
expectedError: "error in eval ceil({__name__=~'testmetric1|testmetric2'}) (line 7): invalid expect statement, must match `^expect\\s+(ordered|fail|warn|no_warn|info|no_info)(?:\\s+(regex|msg):(.+))?$`",
},
"instant query expected not to care about annotations (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_warn instant at 0m sum(metric)
`,
},
"instant query expected to only have warn annotation (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_warn instant at 0m sum(metric)
expect warn
`,
},
"instant query expected to have warn annotation with specific message (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_warn instant at 0m sum(metric)
expect warn msg: PromQL warning: encountered a mix of histograms and floats for aggregation
`,
},
"instant query expected to have warn but not with the specific message (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_warn instant at 0m sum(metric)
expect warn msg: PromQL warning: encountered a mix
`,
expectedError: `expected warn annotation "PromQL warning: encountered a mix" but no matching annotation was found for query "sum(metric)" (line 6)`,
},
"instant query expected to have warn annotation with specific regex (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_warn instant at 0m sum(metric)
expect warn regex: PromQL warning: encountered a mix
`,
},
"instant query expected to have warn but not with the specific regex (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_warn instant at 0m sum(metric)
expect warn regex: PromQL warning: something went (wrong|boom)
`,
expectedError: `expected warn annotation "PromQL warning: something went (wrong|boom)" but no matching annotation was found for query "sum(metric)" (line 6)`,
},
"instant query expected to have warn annotation and no info annotation (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_warn instant at 0m sum(metric)
expect warn
expect no_info
`,
},
"instant query expected to warn and info annotation but got only warn annotation (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_warn instant at 0m sum(metric)
expect warn
expect info
`,
expectedError: `expected info annotations but none were found for query "sum(metric)" (line 6)`,
},
"instant query expected to have no warn but got warn annotation (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_warn instant at 0m sum(metric)
expect no_warn
`,
expectedError: `unexpected warning annotations evaluating query "sum(metric)" (line 6): [PromQL warning: encountered a mix of histograms and floats for aggregation]`,
},
"instant query expected to only have info annotation (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_info instant at 0m min(metric)
expect info
{} 1
`,
},
"instant query expected have info annotation with specific message (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_info instant at 0m min(metric)
expect info msg: PromQL info: ignored histogram in min aggregation
{} 1
`,
},
"instant query expected to have info annotation but not with the specific message (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_info instant at 0m min(metric)
expect info msg: something went wrong
{} 1
`,
expectedError: `expected info annotation "something went wrong" but no matching annotation was found for query "min(metric)" (line 6)`,
},
"instant query expected to have info annotation with specific regex (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_info instant at 0m min(metric)
expect info regex: PromQL info: ignored histogram
{} 1
`,
},
"instant query expected to only have warn but not with the specific regex (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_info instant at 0m min(metric)
expect info regex: something went (wrong|boom)
{} 1
`,
expectedError: `expected info annotation "something went (wrong|boom)" but no matching annotation was found for query "min(metric)" (line 6)`,
},
"instant query expected to have info annotation and no warn annotation (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_info instant at 0m min(metric)
expect info
expect no_warn
{} 1
`,
},
"instant query expected to have warn and info annotation but got only info annotation (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_info instant at 0m min(metric)
expect info
expect warn
{} 1
`,
expectedError: `expected warn annotations but none were found for query "min(metric)" (line 6)`,
},
"instant query expected to have no info annotation but got info annotation (with new eval syntax)": {
input: `
load 5m
metric{src="a"} {{schema:0 sum:1 count:2}}
metric{src="b"} 1
eval_info instant at 0m min(metric)
expect no_info
{} 1
`,
expectedError: `unexpected info annotations evaluating query "min(metric)" (line 6): [PromQL info: ignored histogram in min aggregation]`,
},
"instant query with results expected to match provided order, and result is in expected order (with new eval syntax)": {
input: testData + `
eval_ordered instant at 50m sort(http_requests)
expect ordered
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="canary", instance="1", job="api-server"} 400
`,
},
"instant query with results expected to match provided order, but result is out of order (with new eval syntax)": {
input: testData + `
eval_ordered instant at 50m sort(http_requests)
expect ordered
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="1", job="api-server"} 400
http_requests{group="canary", instance="0", job="api-server"} 300
`,
expectedError: `error in eval sort(http_requests) (line 10): expected metric {__name__="http_requests", group="canary", instance="0", job="api-server"} with [300.000000] at position 4 but was at 3`,
},
}
for name, testCase := range testCases {