Merge pull request #689 from prometheus/fabxc/qltest

Add basic testing language, migrate tests
This commit is contained in:
Fabian Reinartz 2015-05-18 19:22:48 +02:00
commit ac4d63b833
11 changed files with 1721 additions and 2142 deletions

View file

@ -279,20 +279,25 @@ func (ng *Engine) NewRangeQuery(qs string, start, end clientmodel.Timestamp, int
if err != nil {
return nil, err
}
qry := ng.newQuery(expr, start, end, interval)
qry.q = qs
return qry, nil
}
func (ng *Engine) newQuery(expr Expr, start, end clientmodel.Timestamp, interval time.Duration) *query {
es := &EvalStmt{
Expr: expr,
Start: start,
End: end,
Interval: interval,
}
qry := &query{
q: qs,
stmts: Statements{es},
ng: ng,
stats: stats.NewTimerGroup(),
}
return qry, nil
return qry
}
// testStmt is an internal helper statement that allows execution

View file

@ -104,6 +104,8 @@ const (
itemString
itemNumber
itemDuration
itemBlank
itemTimes
operatorsStart
// Operators.
@ -193,6 +195,8 @@ var itemTypeStr = map[itemType]string{
itemComma: ",",
itemAssign: "=",
itemSemicolon: ";",
itemBlank: "_",
itemTimes: "x",
itemSUB: "-",
itemADD: "+",
@ -214,6 +218,9 @@ func init() {
for s, ty := range key {
itemTypeStr[ty] = s
}
// Special numbers.
key["inf"] = itemNumber
key["nan"] = itemNumber
}
func (t itemType) String() string {
@ -277,6 +284,10 @@ type lexer struct {
braceOpen bool // Whether a { is opened.
bracketOpen bool // Whether a [ is opened.
stringOpen rune // Quote rune of the string currently being read.
// seriesDesc is set when a series description for the testing
// language is lexed.
seriesDesc bool
}
// next returns the next rune in the input.
@ -450,21 +461,6 @@ func lexStatements(l *lexer) stateFn {
case r == '"' || r == '\'':
l.stringOpen = r
return lexString
case r == 'N' || r == 'n' || r == 'I' || r == 'i':
n2 := strings.ToLower(l.input[l.pos:])
if len(n2) < 3 || !isAlphaNumeric(rune(n2[2])) {
if (r == 'N' || r == 'n') && strings.HasPrefix(n2, "an") {
l.pos += 2
l.emit(itemNumber)
break
}
if (r == 'I' || r == 'i') && strings.HasPrefix(n2, "nf") {
l.pos += 2
l.emit(itemNumber)
break
}
}
fallthrough
case isAlpha(r) || r == ':':
l.backup()
return lexKeywordOrIdentifier
@ -544,6 +540,10 @@ func lexInsideBraces(l *lexer) stateFn {
case r == '}':
l.emit(itemRightBrace)
l.braceOpen = false
if l.seriesDesc {
return lexValueSequence
}
return lexStatements
default:
return l.errorf("unexpected character inside braces: %q", r)
@ -551,6 +551,34 @@ func lexInsideBraces(l *lexer) stateFn {
return lexInsideBraces
}
// lexValueSequence scans a value sequence of a series description.
func lexValueSequence(l *lexer) stateFn {
switch r := l.next(); {
case r == eof:
return lexStatements
case isSpace(r):
lexSpace(l)
case r == '+':
l.emit(itemADD)
case r == '-':
l.emit(itemSUB)
case r == 'x':
l.emit(itemTimes)
case r == '_':
l.emit(itemBlank)
case unicode.IsDigit(r) || (r == '.' && unicode.IsDigit(l.peek())):
l.backup()
lexNumber(l)
case isAlpha(r):
l.backup()
// We might lex invalid items here but this will be caught by the parser.
return lexKeywordOrIdentifier
default:
return l.errorf("unexpected character in series sequence: %q", r)
}
return lexValueSequence
}
// lexString scans a quoted string. The initial quote has already been seen.
func lexString(l *lexer) stateFn {
Loop:
@ -650,7 +678,7 @@ func (l *lexer) scanNumber() bool {
l.acceptRun("0123456789")
}
// Next thing must not be alphanumeric.
if isAlphaNumeric(l.peek()) {
if isAlphaNumeric(l.peek()) && !l.seriesDesc {
return false
}
return true
@ -689,6 +717,9 @@ Loop:
break Loop
}
}
if l.seriesDesc && l.peek() != '{' {
return lexValueSequence
}
return lexStatements
}

View file

@ -14,14 +14,16 @@
package promql
import (
"fmt"
"reflect"
"testing"
)
var tests = []struct {
input string
expected []item
fail bool
input string
expected []item
fail bool
seriesDesc bool // Whether to lex a series description.
}{
// Test common stuff.
{
@ -354,6 +356,42 @@ var tests = []struct {
}, {
input: `]`, fail: true,
},
// Test series description.
{
input: `{} _ 1 x .3`,
expected: []item{
{itemLeftBrace, 0, `{`},
{itemRightBrace, 1, `}`},
{itemBlank, 3, `_`},
{itemNumber, 5, `1`},
{itemTimes, 7, `x`},
{itemNumber, 9, `.3`},
},
seriesDesc: true,
},
{
input: `metric +Inf Inf NaN`,
expected: []item{
{itemIdentifier, 0, `metric`},
{itemADD, 7, `+`},
{itemNumber, 8, `Inf`},
{itemNumber, 12, `Inf`},
{itemNumber, 16, `NaN`},
},
seriesDesc: true,
},
{
input: `metric 1+1x4`,
expected: []item{
{itemIdentifier, 0, `metric`},
{itemNumber, 7, `1`},
{itemADD, 8, `+`},
{itemNumber, 9, `1`},
{itemTimes, 10, `x`},
{itemNumber, 11, `4`},
},
seriesDesc: true,
},
}
// TestLexer tests basic functionality of the lexer. More elaborate tests are implemented
@ -361,6 +399,7 @@ var tests = []struct {
func TestLexer(t *testing.T) {
for i, test := range tests {
l := lex(test.input)
l.seriesDesc = test.seriesDesc
out := []item{}
for it := range l.items {
@ -370,20 +409,32 @@ func TestLexer(t *testing.T) {
lastItem := out[len(out)-1]
if test.fail {
if lastItem.typ != itemError {
t.Fatalf("%d: expected lexing error but did not fail", i)
t.Logf("%d: input %q", i, test.input)
t.Fatalf("expected lexing error but did not fail")
}
continue
}
if lastItem.typ == itemError {
t.Fatalf("%d: unexpected lexing error: %s", i, lastItem)
t.Logf("%d: input %q", i, test.input)
t.Fatalf("unexpected lexing error at position %d: %s", lastItem.pos, lastItem)
}
if !reflect.DeepEqual(lastItem, item{itemEOF, Pos(len(test.input)), ""}) {
t.Fatalf("%d: lexing error: expected output to end with EOF item", i)
t.Logf("%d: input %q", i, test.input)
t.Fatalf("lexing error: expected output to end with EOF item.\ngot:\n%s", expectedList(out))
}
out = out[:len(out)-1]
if !reflect.DeepEqual(out, test.expected) {
t.Errorf("%d: lexing mismatch:\nexpected: %#v\n-----\ngot: %#v", i, test.expected, out)
t.Logf("%d: input %q", i, test.input)
t.Fatalf("lexing mismatch:\nexpected:\n%s\ngot:\n%s", expectedList(test.expected), expectedList(out))
}
}
}
func expectedList(exp []item) string {
s := ""
for _, it := range exp {
s += fmt.Sprintf("\t%#v\n", it)
}
return s
}

View file

@ -70,6 +70,14 @@ func ParseExpr(input string) (Expr, error) {
return expr, err
}
// parseSeriesDesc parses the description of a time series.
func parseSeriesDesc(input string) (clientmodel.Metric, []sequenceValue, error) {
p := newParser(input)
p.lex.seriesDesc = true
return p.parseSeriesDesc()
}
// newParser returns a new parser.
func newParser(input string) *parser {
p := &parser{
@ -112,6 +120,105 @@ func (p *parser) parseExpr() (expr Expr, err error) {
return
}
// sequenceValue is an omittable value in a sequence of time series values.
type sequenceValue struct {
value clientmodel.SampleValue
omitted bool
}
func (v sequenceValue) String() string {
if v.omitted {
return "_"
}
return v.value.String()
}
// parseSeriesDesc parses a description of a time series into its metric and value sequence.
func (p *parser) parseSeriesDesc() (m clientmodel.Metric, vals []sequenceValue, err error) {
defer p.recover(&err)
name := ""
m = clientmodel.Metric{}
t := p.peek().typ
if t == itemIdentifier || t == itemMetricIdentifier {
name = p.next().val
t = p.peek().typ
}
if t == itemLeftBrace {
m = clientmodel.Metric(p.labelSet())
}
if name != "" {
m[clientmodel.MetricNameLabel] = clientmodel.LabelValue(name)
}
const ctx = "series values"
for {
if p.peek().typ == itemEOF {
break
}
// Extract blanks.
if p.peek().typ == itemBlank {
p.next()
times := uint64(1)
if p.peek().typ == itemTimes {
p.next()
times, err = strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
}
for i := uint64(0); i < times; i++ {
vals = append(vals, sequenceValue{omitted: true})
}
continue
}
// Extract values.
sign := 1.0
if t := p.peek().typ; t == itemSUB || t == itemADD {
if p.next().typ == itemSUB {
sign = -1
}
}
k := sign * p.number(p.expect(itemNumber, ctx).val)
vals = append(vals, sequenceValue{
value: clientmodel.SampleValue(k),
})
// If there are no offset repetitions specified, proceed with the next value.
if t := p.peek().typ; t == itemNumber || t == itemBlank {
continue
} else if t == itemEOF {
break
} else if t != itemADD && t != itemSUB {
p.errorf("expected next value or relative expansion in %s but got %s", ctx, t.desc())
}
// Expand the repeated offsets into values.
sign = 1.0
if p.next().typ == itemSUB {
sign = -1.0
}
offset := sign * p.number(p.expect(itemNumber, ctx).val)
p.expect(itemTimes, ctx)
times, err := strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
for i := uint64(0); i < times; i++ {
k += offset
vals = append(vals, sequenceValue{
value: clientmodel.SampleValue(k),
})
}
}
return m, vals, nil
}
// typecheck checks correct typing of the parsed statements or expression.
func (p *parser) typecheck(node Node) (err error) {
defer p.recover(&err)

View file

@ -167,6 +167,10 @@ var testExpr = []struct {
input: "((1)",
fail: true,
errMsg: "unclosed left parenthesis",
}, {
input: "999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999",
fail: true,
errMsg: "out of range",
}, {
input: "(",
fail: true,
@ -476,6 +480,14 @@ var testExpr = []struct {
input: "foo or on(bar) group_right(baz) bar",
fail: true,
errMsg: "no grouping allowed for AND and OR operations",
}, {
input: `http_requests{group="production"} / on(instance) group_left cpu_count{type="smp"}`,
fail: true,
errMsg: "unexpected identifier \"cpu_count\" in grouping opts, expected \"(\"",
}, {
input: `http_requests{group="production"} + on(instance) group_left(job,instance) cpu_count{type="smp"}`,
fail: true,
errMsg: "label \"instance\" must not occur in ON and INCLUDE clause at once",
},
// Test vector selector.
{
@ -662,6 +674,9 @@ var testExpr = []struct {
input: `foo[5m] OFFSET 1h30m`,
fail: true,
errMsg: "bad number or duration syntax: \"1h3\"",
}, {
input: `foo["5m"]`,
fail: true,
}, {
input: `foo[]`,
fail: true,
@ -1216,3 +1231,101 @@ func mustGetFunction(name string) *Function {
}
return f
}
var testSeries = []struct {
input string
expectedMetric clientmodel.Metric
expectedValues []sequenceValue
fail bool
}{
{
input: `{} 1 2 3`,
expectedMetric: clientmodel.Metric{},
expectedValues: newSeq(1, 2, 3),
}, {
input: `{a="b"} -1 2 3`,
expectedMetric: clientmodel.Metric{
"a": "b",
},
expectedValues: newSeq(-1, 2, 3),
}, {
input: `my_metric 1 2 3`,
expectedMetric: clientmodel.Metric{
clientmodel.MetricNameLabel: "my_metric",
},
expectedValues: newSeq(1, 2, 3),
}, {
input: `my_metric{} 1 2 3`,
expectedMetric: clientmodel.Metric{
clientmodel.MetricNameLabel: "my_metric",
},
expectedValues: newSeq(1, 2, 3),
}, {
input: `my_metric{a="b"} 1 2 3`,
expectedMetric: clientmodel.Metric{
clientmodel.MetricNameLabel: "my_metric",
"a": "b",
},
expectedValues: newSeq(1, 2, 3),
}, {
input: `my_metric{a="b"} 1 2 3-10x4`,
expectedMetric: clientmodel.Metric{
clientmodel.MetricNameLabel: "my_metric",
"a": "b",
},
expectedValues: newSeq(1, 2, 3, -7, -17, -27, -37),
}, {
input: `my_metric{a="b"} 1 3 _ 5 _x4`,
expectedMetric: clientmodel.Metric{
clientmodel.MetricNameLabel: "my_metric",
"a": "b",
},
expectedValues: newSeq(1, 3, none, 5, none, none, none, none),
}, {
input: `my_metric{a="b"} 1 3 _ 5 _a4`,
fail: true,
},
}
// For these tests only, we use the smallest float64 to signal an omitted value.
const none = math.SmallestNonzeroFloat64
func newSeq(vals ...float64) (res []sequenceValue) {
for _, v := range vals {
if v == none {
res = append(res, sequenceValue{omitted: true})
} else {
res = append(res, sequenceValue{value: clientmodel.SampleValue(v)})
}
}
return res
}
func TestParseSeries(t *testing.T) {
for _, test := range testSeries {
parser := newParser(test.input)
parser.lex.seriesDesc = true
metric, vals, err := parser.parseSeriesDesc()
if !test.fail && err != nil {
t.Errorf("error in input: \n\n%s\n", test.input)
t.Fatalf("could not parse: %s", err)
}
if test.fail && err != nil {
continue
}
if test.fail {
if err != nil {
continue
}
t.Errorf("error in input: \n\n%s\n", test.input)
t.Fatalf("failure expected, but passed")
}
if !reflect.DeepEqual(vals, test.expectedValues) || !reflect.DeepEqual(metric, test.expectedMetric) {
t.Errorf("error in input: \n\n%s\n", test.input)
t.Fatalf("no match\n\nexpected:\n%s %s\ngot: \n%s %s\n", test.expectedMetric, test.expectedValues, metric, vals)
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,486 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
import (
"time"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/local"
"github.com/prometheus/prometheus/storage/metric"
)
var testSampleInterval = time.Duration(5) * time.Minute
var testStartTime = clientmodel.Timestamp(0)
func getTestValueStream(startVal, endVal, stepVal clientmodel.SampleValue, startTime clientmodel.Timestamp) (resultValues metric.Values) {
currentTime := startTime
for currentVal := startVal; currentVal <= endVal; currentVal += stepVal {
sample := metric.SamplePair{
Value: currentVal,
Timestamp: currentTime,
}
resultValues = append(resultValues, sample)
currentTime = currentTime.Add(testSampleInterval)
}
return resultValues
}
func getTestVectorFromTestMatrix(matrix Matrix) Vector {
vector := Vector{}
for _, sampleStream := range matrix {
lastSample := sampleStream.Values[len(sampleStream.Values)-1]
vector = append(vector, &Sample{
Metric: sampleStream.Metric,
Value: lastSample.Value,
Timestamp: lastSample.Timestamp,
})
}
return vector
}
func storeMatrix(storage local.Storage, matrix Matrix) {
pendingSamples := clientmodel.Samples{}
for _, sampleStream := range matrix {
for _, sample := range sampleStream.Values {
pendingSamples = append(pendingSamples, &clientmodel.Sample{
Metric: sampleStream.Metric.Metric,
Value: sample.Value,
Timestamp: sample.Timestamp,
})
}
}
for _, s := range pendingSamples {
storage.Append(s)
}
storage.WaitForIndexing()
}
var testVector = getTestVectorFromTestMatrix(testMatrix)
var testMatrix = Matrix{
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "http_requests",
clientmodel.JobLabel: "api-server",
"instance": "0",
"group": "production",
},
},
Values: getTestValueStream(0, 100, 10, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "http_requests",
clientmodel.JobLabel: "api-server",
"instance": "1",
"group": "production",
},
},
Values: getTestValueStream(0, 200, 20, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "http_requests",
clientmodel.JobLabel: "api-server",
"instance": "0",
"group": "canary",
},
},
Values: getTestValueStream(0, 300, 30, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "http_requests",
clientmodel.JobLabel: "api-server",
"instance": "1",
"group": "canary",
},
},
Values: getTestValueStream(0, 400, 40, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "http_requests",
clientmodel.JobLabel: "app-server",
"instance": "0",
"group": "production",
},
},
Values: getTestValueStream(0, 500, 50, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "http_requests",
clientmodel.JobLabel: "app-server",
"instance": "1",
"group": "production",
},
},
Values: getTestValueStream(0, 600, 60, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "http_requests",
clientmodel.JobLabel: "app-server",
"instance": "0",
"group": "canary",
},
},
Values: getTestValueStream(0, 700, 70, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "http_requests",
clientmodel.JobLabel: "app-server",
"instance": "1",
"group": "canary",
},
},
Values: getTestValueStream(0, 800, 80, testStartTime),
},
// Single-letter metric and label names.
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "x",
"y": "testvalue",
},
},
Values: getTestValueStream(0, 100, 10, testStartTime),
},
// Counter reset in the middle of range.
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testcounter_reset_middle",
},
},
Values: append(getTestValueStream(0, 40, 10, testStartTime), getTestValueStream(0, 50, 10, testStartTime.Add(testSampleInterval*5))...),
},
// Counter reset at the end of range.
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testcounter_reset_end",
},
},
Values: append(getTestValueStream(0, 90, 10, testStartTime), getTestValueStream(0, 0, 10, testStartTime.Add(testSampleInterval*10))...),
},
// For label-key grouping regression test.
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "label_grouping_test",
"a": "aa",
"b": "bb",
},
},
Values: getTestValueStream(0, 100, 10, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "label_grouping_test",
"a": "a",
"b": "abb",
},
},
Values: getTestValueStream(0, 200, 20, testStartTime),
},
// Two histograms with 4 buckets each (*_sum and *_count not included,
// only buckets). Lowest bucket for one histogram < 0, for the other >
// 0. They have the same name, just separated by label. Not useful in
// practice, but can happen (if clients change bucketing), and the
// server has to cope with it.
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testhistogram_bucket",
"le": "0.1",
"start": "positive",
},
},
Values: getTestValueStream(0, 50, 5, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testhistogram_bucket",
"le": ".2",
"start": "positive",
},
},
Values: getTestValueStream(0, 70, 7, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testhistogram_bucket",
"le": "1e0",
"start": "positive",
},
},
Values: getTestValueStream(0, 110, 11, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testhistogram_bucket",
"le": "+Inf",
"start": "positive",
},
},
Values: getTestValueStream(0, 120, 12, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testhistogram_bucket",
"le": "-.2",
"start": "negative",
},
},
Values: getTestValueStream(0, 10, 1, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testhistogram_bucket",
"le": "-0.1",
"start": "negative",
},
},
Values: getTestValueStream(0, 20, 2, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testhistogram_bucket",
"le": "0.3",
"start": "negative",
},
},
Values: getTestValueStream(0, 20, 2, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testhistogram_bucket",
"le": "+Inf",
"start": "negative",
},
},
Values: getTestValueStream(0, 30, 3, testStartTime),
},
// Now a more realistic histogram per job and instance to test aggregation.
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "request_duration_seconds_bucket",
clientmodel.JobLabel: "job1",
"instance": "ins1",
"le": "0.1",
},
},
Values: getTestValueStream(0, 10, 1, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "request_duration_seconds_bucket",
clientmodel.JobLabel: "job1",
"instance": "ins1",
"le": "0.2",
},
},
Values: getTestValueStream(0, 30, 3, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "request_duration_seconds_bucket",
clientmodel.JobLabel: "job1",
"instance": "ins1",
"le": "+Inf",
},
},
Values: getTestValueStream(0, 40, 4, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "request_duration_seconds_bucket",
clientmodel.JobLabel: "job1",
"instance": "ins2",
"le": "0.1",
},
},
Values: getTestValueStream(0, 20, 2, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "request_duration_seconds_bucket",
clientmodel.JobLabel: "job1",
"instance": "ins2",
"le": "0.2",
},
},
Values: getTestValueStream(0, 50, 5, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "request_duration_seconds_bucket",
clientmodel.JobLabel: "job1",
"instance": "ins2",
"le": "+Inf",
},
},
Values: getTestValueStream(0, 60, 6, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "request_duration_seconds_bucket",
clientmodel.JobLabel: "job2",
"instance": "ins1",
"le": "0.1",
},
},
Values: getTestValueStream(0, 30, 3, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "request_duration_seconds_bucket",
clientmodel.JobLabel: "job2",
"instance": "ins1",
"le": "0.2",
},
},
Values: getTestValueStream(0, 40, 4, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "request_duration_seconds_bucket",
clientmodel.JobLabel: "job2",
"instance": "ins1",
"le": "+Inf",
},
},
Values: getTestValueStream(0, 60, 6, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "request_duration_seconds_bucket",
clientmodel.JobLabel: "job2",
"instance": "ins2",
"le": "0.1",
},
},
Values: getTestValueStream(0, 40, 4, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "request_duration_seconds_bucket",
clientmodel.JobLabel: "job2",
"instance": "ins2",
"le": "0.2",
},
},
Values: getTestValueStream(0, 70, 7, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "request_duration_seconds_bucket",
clientmodel.JobLabel: "job2",
"instance": "ins2",
"le": "+Inf",
},
},
Values: getTestValueStream(0, 90, 9, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "vector_matching_a",
"l": "x",
},
},
Values: getTestValueStream(0, 100, 1, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "vector_matching_a",
"l": "y",
},
},
Values: getTestValueStream(0, 100, 2, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "vector_matching_b",
"l": "x",
},
},
Values: getTestValueStream(0, 100, 4, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "cpu_count",
"instance": "0",
"type": "numa",
},
},
Values: getTestValueStream(0, 500, 30, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "cpu_count",
"instance": "0",
"type": "smp",
},
},
Values: getTestValueStream(0, 200, 10, testStartTime),
},
{
Metric: clientmodel.COWMetric{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "cpu_count",
"instance": "1",
"type": "smp",
},
},
Values: getTestValueStream(0, 200, 20, testStartTime),
},
}

507
promql/test.go Normal file
View file

@ -0,0 +1,507 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
import (
"fmt"
"io/ioutil"
"math"
"regexp"
"strconv"
"strings"
"testing"
"time"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/local"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/utility"
testutil "github.com/prometheus/prometheus/utility/test"
)
var (
minNormal = math.Float64frombits(0x0010000000000000) // The smallest positive normal value of type float64.
patSpace = regexp.MustCompile("[\t ]+")
patLoad = regexp.MustCompile(`^load\s+(.+?)$`)
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
)
const (
testStartTime = clientmodel.Timestamp(0)
epsilon = 0.000001 // Relative error allowed for sample values.
maxErrorCount = 10
)
// Test is a sequence of read and write commands that are run
// against a test storage.
type Test struct {
*testing.T
cmds []testCommand
storage local.Storage
closeStorage func()
queryEngine *Engine
}
// NewTest returns an initialized empty Test.
func NewTest(t *testing.T, input string) (*Test, error) {
test := &Test{
T: t,
cmds: []testCommand{},
}
err := test.parse(input)
test.clear()
return test, err
}
func NewTestFromFile(t *testing.T, filename string) (*Test, error) {
content, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
return NewTest(t, string(content))
}
func raise(line int, format string, v ...interface{}) error {
return &ParseErr{
Line: line + 1,
Err: fmt.Errorf(format, v...),
}
}
func (t *Test) parseLoad(lines []string, i int) (int, *loadCmd, error) {
if !patLoad.MatchString(lines[i]) {
return i, nil, raise(i, "invalid load command. (load <step:duration>)")
}
parts := patLoad.FindStringSubmatch(lines[i])
gap, err := utility.StringToDuration(parts[1])
if err != nil {
return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err)
}
cmd := newLoadCmd(gap)
for i+1 < len(lines) {
i++
defLine := lines[i]
if len(defLine) == 0 {
i--
break
}
metric, vals, err := parseSeriesDesc(defLine)
if err != nil {
perr := err.(*ParseErr)
perr.Line = i + 1
return i, nil, err
}
cmd.set(metric, vals...)
}
return i, cmd, nil
}
func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) {
if !patEvalInstant.MatchString(lines[i]) {
return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at <offset:duration>] <query>")
}
parts := patEvalInstant.FindStringSubmatch(lines[i])
var (
mod = parts[1]
at = parts[2]
qry = parts[3]
)
expr, err := ParseExpr(qry)
if err != nil {
perr := err.(*ParseErr)
perr.Line = i + 1
perr.Pos += strings.Index(lines[i], qry)
return i, nil, perr
}
offset, err := utility.StringToDuration(at)
if err != nil {
return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err)
}
ts := testStartTime.Add(offset)
cmd := newEvalCmd(expr, ts, ts, 0)
switch mod {
case "ordered":
cmd.ordered = true
case "fail":
cmd.fail = true
}
for j := 1; i+1 < len(lines); j++ {
i++
defLine := lines[i]
if len(defLine) == 0 {
i--
break
}
if f, err := parseNumber(defLine); err == nil {
cmd.expect(0, nil, sequenceValue{value: clientmodel.SampleValue(f)})
break
}
metric, vals, err := parseSeriesDesc(defLine)
if err != nil {
perr := err.(*ParseErr)
perr.Line = i + 1
return i, nil, err
}
// Currently, we are not expecting any matrices.
if len(vals) > 1 {
return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed")
}
cmd.expect(j, metric, vals...)
}
return i, cmd, nil
}
// parse the given command sequence and appends it to the test.
func (t *Test) parse(input string) error {
// Trim lines and remove comments.
lines := strings.Split(input, "\n")
for i, l := range lines {
l = strings.TrimSpace(l)
if strings.HasPrefix(l, "#") {
l = ""
}
lines[i] = l
}
var err error
// Scan for steps line by line.
for i := 0; i < len(lines); i++ {
l := lines[i]
if len(l) == 0 {
continue
}
var cmd testCommand
switch c := strings.ToLower(patSpace.Split(l, 2)[0]); {
case c == "clear":
cmd = &clearCmd{}
case c == "load":
i, cmd, err = t.parseLoad(lines, i)
case strings.HasPrefix(c, "eval"):
i, cmd, err = t.parseEval(lines, i)
default:
return raise(i, "invalid command %q", l)
}
if err != nil {
return err
}
t.cmds = append(t.cmds, cmd)
}
return nil
}
// testCommand is an interface that ensures that only the package internal
// types can be a valid command for a test.
type testCommand interface {
testCmd()
}
func (*clearCmd) testCmd() {}
func (*loadCmd) testCmd() {}
func (*evalCmd) testCmd() {}
// loadCmd is a command that loads sequences of sample values for specific
// metrics into the storage.
type loadCmd struct {
gap time.Duration
metrics map[clientmodel.Fingerprint]clientmodel.Metric
defs map[clientmodel.Fingerprint]metric.Values
}
func newLoadCmd(gap time.Duration) *loadCmd {
return &loadCmd{
gap: gap,
metrics: map[clientmodel.Fingerprint]clientmodel.Metric{},
defs: map[clientmodel.Fingerprint]metric.Values{},
}
}
func (cmd loadCmd) String() string {
return "load"
}
// set a sequence of sample values for the given metric.
func (cmd *loadCmd) set(m clientmodel.Metric, vals ...sequenceValue) {
fp := m.Fingerprint()
samples := make(metric.Values, 0, len(vals))
ts := testStartTime
for _, v := range vals {
if !v.omitted {
samples = append(samples, metric.SamplePair{
Timestamp: ts,
Value: v.value,
})
}
ts = ts.Add(cmd.gap)
}
cmd.defs[fp] = samples
cmd.metrics[fp] = m
}
// append the defined time series to the storage.
func (cmd *loadCmd) append(a storage.SampleAppender) {
for fp, samples := range cmd.defs {
met := cmd.metrics[fp]
for _, smpl := range samples {
s := &clientmodel.Sample{
Metric: met,
Value: smpl.Value,
Timestamp: smpl.Timestamp,
}
a.Append(s)
}
}
}
// evalCmd is a command that evaluates an expression for the given time (range)
// and expects a specific result.
type evalCmd struct {
expr Expr
start, end clientmodel.Timestamp
interval time.Duration
instant bool
fail, ordered bool
metrics map[clientmodel.Fingerprint]clientmodel.Metric
expected map[clientmodel.Fingerprint]entry
}
type entry struct {
pos int
vals []sequenceValue
}
func (e entry) String() string {
return fmt.Sprintf("%d: %s", e.pos, e.vals)
}
func newEvalCmd(expr Expr, start, end clientmodel.Timestamp, interval time.Duration) *evalCmd {
return &evalCmd{
expr: expr,
start: start,
end: end,
interval: interval,
instant: start == end && interval == 0,
metrics: map[clientmodel.Fingerprint]clientmodel.Metric{},
expected: map[clientmodel.Fingerprint]entry{},
}
}
func (ev *evalCmd) String() string {
return "eval"
}
// expect adds a new metric with a sequence of values to the set of expected
// results for the query.
func (ev *evalCmd) expect(pos int, m clientmodel.Metric, vals ...sequenceValue) {
if m == nil {
ev.expected[0] = entry{pos: pos, vals: vals}
return
}
fp := m.Fingerprint()
ev.metrics[fp] = m
ev.expected[fp] = entry{pos: pos, vals: vals}
}
// compareResult compares the result value with the defined expectation.
func (ev *evalCmd) compareResult(result Value) error {
switch val := result.(type) {
case Matrix:
if ev.instant {
return fmt.Errorf("received range result on instant evaluation")
}
seen := map[clientmodel.Fingerprint]bool{}
for pos, v := range val {
fp := v.Metric.Metric.Fingerprint()
if _, ok := ev.metrics[fp]; !ok {
return fmt.Errorf("unexpected metric %s in result", v.Metric.Metric)
}
exp := ev.expected[fp]
if ev.ordered && exp.pos != pos+1 {
return fmt.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric.Metric, exp.vals, exp.pos, pos+1)
}
for i, expVal := range exp.vals {
if !almostEqual(float64(expVal.value), float64(v.Values[i].Value)) {
return fmt.Errorf("expected %v for %s but got %v", expVal, v.Metric.Metric, v.Values)
}
}
seen[fp] = true
}
for fp, expVals := range ev.expected {
if !seen[fp] {
return fmt.Errorf("expected metric %s with %v not found", ev.metrics[fp], expVals)
}
}
case Vector:
if !ev.instant {
fmt.Errorf("received instant result on range evaluation")
}
seen := map[clientmodel.Fingerprint]bool{}
for pos, v := range val {
fp := v.Metric.Metric.Fingerprint()
if _, ok := ev.metrics[fp]; !ok {
return fmt.Errorf("unexpected metric %s in result", v.Metric.Metric)
}
exp := ev.expected[fp]
if ev.ordered && exp.pos != pos+1 {
return fmt.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric.Metric, exp.vals, exp.pos, pos+1)
}
if !almostEqual(float64(exp.vals[0].value), float64(v.Value)) {
return fmt.Errorf("expected %v for %s but got %v", exp.vals[0].value, v.Metric.Metric, v.Value)
}
seen[fp] = true
}
for fp, expVals := range ev.expected {
if !seen[fp] {
return fmt.Errorf("expected metric %s with %v not found", ev.metrics[fp], expVals)
}
}
case *Scalar:
if !almostEqual(float64(ev.expected[0].vals[0].value), float64(val.Value)) {
return fmt.Errorf("expected scalar %v but got %v", val.Value, ev.expected[0].vals[0].value)
}
default:
panic(fmt.Errorf("promql.Test.compareResult: unexpected result type %T", result))
}
return nil
}
// clearCmd is a command that wipes the test's storage state.
type clearCmd struct{}
func (cmd clearCmd) String() string {
return "clear"
}
// Run executes the command sequence of the test. Until the maximum error number
// is reached, evaluation errors do not terminate execution.
func (t *Test) Run() error {
for _, cmd := range t.cmds {
err := t.exec(cmd)
// TODO(fabxc): aggregate command errors, yield diffs for result
// comparison errors.
if err != nil {
return err
}
}
return nil
}
// exec processes a single step of the test
func (t *Test) exec(tc testCommand) error {
switch cmd := tc.(type) {
case *clearCmd:
t.clear()
case *loadCmd:
cmd.append(t.storage)
t.storage.WaitForIndexing()
case *evalCmd:
q := t.queryEngine.newQuery(cmd.expr, cmd.start, cmd.end, cmd.interval)
res := q.Exec()
if res.Err != nil {
if cmd.fail {
return nil
}
return fmt.Errorf("error evaluating query: %s", res.Err)
}
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query but got none")
}
err := cmd.compareResult(res.Value)
if err != nil {
return fmt.Errorf("error in %s %s: %s", cmd, cmd.expr, err)
}
default:
panic("promql.Test.exec: unknown test command type")
}
return nil
}
// clear the current test storage of all inserted samples.
func (t *Test) clear() {
if t.closeStorage != nil {
t.closeStorage()
}
if t.queryEngine != nil {
t.queryEngine.Stop()
}
var closer testutil.Closer
t.storage, closer = local.NewTestStorage(t, 1)
t.closeStorage = closer.Close
t.queryEngine = NewEngine(t.storage)
}
func (t *Test) Close() {
t.queryEngine.Stop()
t.closeStorage()
}
// samplesAlmostEqual returns true if the two sample lines only differ by a
// small relative error in their sample value.
func almostEqual(a, b float64) bool {
// NaN has no equality but for testing we still want to know whether both values
// are NaN.
if math.IsNaN(a) && math.IsNaN(b) {
return true
}
// Cf. http://floating-point-gui.de/errors/comparison/
if a == b {
return true
}
diff := math.Abs(a - b)
if a == 0 || b == 0 || diff < minNormal {
return diff < epsilon*minNormal
}
return diff/(math.Abs(a)+math.Abs(b)) < epsilon
}
func parseNumber(s string) (float64, error) {
n, err := strconv.ParseInt(s, 0, 64)
f := float64(n)
if err != nil {
f, err = strconv.ParseFloat(s, 64)
}
if err != nil {
return 0, fmt.Errorf("error parsing number: %s", err)
}
return f, nil
}

141
promql/testdata/histograms.test vendored Normal file
View file

@ -0,0 +1,141 @@
# Two histograms with 4 buckets each (x_sum and x_count not included,
# only buckets). Lowest bucket for one histogram < 0, for the other >
# 0. They have the same name, just separated by label. Not useful in
# practice, but can happen (if clients change bucketing), and the
# server has to cope with it.
# Test histogram.
load 5m
testhistogram_bucket{le="0.1", start="positive"} 0+5x10
testhistogram_bucket{le=".2", start="positive"} 0+7x10
testhistogram_bucket{le="1e0", start="positive"} 0+11x10
testhistogram_bucket{le="+Inf", start="positive"} 0+12x10
testhistogram_bucket{le="-.2", start="negative"} 0+1x10
testhistogram_bucket{le="-0.1", start="negative"} 0+2x10
testhistogram_bucket{le="0.3", start="negative"} 0+2x10
testhistogram_bucket{le="+Inf", start="negative"} 0+3x10
# Now a more realistic histogram per job and instance to test aggregation.
load 5m
request_duration_seconds_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
request_duration_seconds_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
request_duration_seconds_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
request_duration_seconds_bucket{job="job1", instance="ins2", le="0.1"} 0+2x10
request_duration_seconds_bucket{job="job1", instance="ins2", le="0.2"} 0+5x10
request_duration_seconds_bucket{job="job1", instance="ins2", le="+Inf"} 0+6x10
request_duration_seconds_bucket{job="job2", instance="ins1", le="0.1"} 0+3x10
request_duration_seconds_bucket{job="job2", instance="ins1", le="0.2"} 0+4x10
request_duration_seconds_bucket{job="job2", instance="ins1", le="+Inf"} 0+6x10
request_duration_seconds_bucket{job="job2", instance="ins2", le="0.1"} 0+4x10
request_duration_seconds_bucket{job="job2", instance="ins2", le="0.2"} 0+7x10
request_duration_seconds_bucket{job="job2", instance="ins2", le="+Inf"} 0+9x10
# Quantile too low.
eval instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
{start="positive"} -Inf
{start="negative"} -Inf
# Quantile too high.
eval instant at 50m histogram_quantile(1.01, testhistogram_bucket)
{start="positive"} +Inf
{start="negative"} +Inf
# Quantile value in lowest bucket, which is positive.
eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="positive"})
{start="positive"} 0
# Quantile value in lowest bucket, which is negative.
eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="negative"})
{start="negative"} -0.2
# Quantile value in highest bucket.
eval instant at 50m histogram_quantile(1, testhistogram_bucket)
{start="positive"} 1
{start="negative"} 0.3
# Finally some useful quantiles.
eval instant at 50m histogram_quantile(0.2, testhistogram_bucket)
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
{start="positive"} 0.72
{start="negative"} 0.3
# More realistic with rates.
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m]))
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m]))
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m]))
{start="positive"} 0.72
{start="negative"} 0.3
# Aggregated histogram: Everything in one.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le))
{} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le))
{} 0.1277777777777778
# Aggregated histogram: Everything in one. Now with avg, which does not change anything.
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le))
{} 0.075
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le))
{} 0.12777777777777778
# Aggregated histogram: By job.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
{instance="ins1"} 0.075
{instance="ins2"} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
{instance="ins1"} 0.1333333333
{instance="ins2"} 0.125
# Aggregated histogram: By instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
{job="job1"} 0.1
{job="job2"} 0.0642857142857143
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
{job="job1"} 0.14
{job="job2"} 0.1125
# Aggregated histogram: By job and instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.1333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.1166666666666667
# The unaggregated histogram for comparison. Same result as the previous one.
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m]))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m]))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.13333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.11666666666666667

673
promql/testdata/legacy.test vendored Normal file
View file

@ -0,0 +1,673 @@
load 5m
http_requests{job="api-server", instance="0", group="production"} 0+10x10
http_requests{job="api-server", instance="1", group="production"} 0+20x10
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
http_requests{job="app-server", instance="0", group="production"} 0+50x10
http_requests{job="app-server", instance="1", group="production"} 0+60x10
http_requests{job="app-server", instance="0", group="canary"} 0+70x10
http_requests{job="app-server", instance="1", group="canary"} 0+80x10
load 5m
x{y="testvalue"} 0+10x10
load 5m
testcounter_reset_middle 0+10x4 0+10x5
testcounter_reset_end 0+10x9 0 10
load 5m
label_grouping_test{a="aa", b="bb"} 0+10x10
label_grouping_test{a="a", b="abb"} 0+20x10
load 5m
vector_matching_a{l="x"} 0+1x100
vector_matching_a{l="y"} 0+2x50
vector_matching_b{l="x"} 0+4x25
load 5m
cpu_count{instance="0", type="numa"} 0+30x10
cpu_count{instance="0", type="smp"} 0+10x20
cpu_count{instance="1", type="smp"} 0+20x10
eval instant at 50m SUM(http_requests)
{} 3600
eval instant at 50m SUM(http_requests{instance="0"}) BY(job)
{job="api-server"} 400
{job="app-server"} 1200
eval instant at 50m SUM(http_requests{instance="0"}) BY(job) KEEPING_EXTRA
{instance="0", job="api-server"} 400
{instance="0", job="app-server"} 1200
eval instant at 50m SUM(http_requests) BY (job)
{job="api-server"} 1000
{job="app-server"} 2600
# Non-existent labels mentioned in BY-clauses shouldn't propagate to output.
eval instant at 50m SUM(http_requests) BY (job, nonexistent)
{job="api-server"} 1000
{job="app-server"} 2600
eval instant at 50m COUNT(http_requests) BY (job)
{job="api-server"} 4
{job="app-server"} 4
eval instant at 50m SUM(http_requests) BY (job, group)
{group="canary", job="api-server"} 700
{group="canary", job="app-server"} 1500
{group="production", job="api-server"} 300
{group="production", job="app-server"} 1100
eval instant at 50m AVG(http_requests) BY (job)
{job="api-server"} 250
{job="app-server"} 650
eval instant at 50m MIN(http_requests) BY (job)
{job="api-server"} 100
{job="app-server"} 500
eval instant at 50m MAX(http_requests) BY (job)
{job="api-server"} 400
{job="app-server"} 800
eval instant at 50m SUM(http_requests) BY (job) - COUNT(http_requests) BY (job)
{job="api-server"} 996
{job="app-server"} 2596
eval instant at 50m 2 - SUM(http_requests) BY (job)
{job="api-server"} -998
{job="app-server"} -2598
eval instant at 50m 1000 / SUM(http_requests) BY (job)
{job="api-server"} 1
{job="app-server"} 0.38461538461538464
eval instant at 50m SUM(http_requests) BY (job) - 2
{job="api-server"} 998
{job="app-server"} 2598
eval instant at 50m SUM(http_requests) BY (job) % 3
{job="api-server"} 1
{job="app-server"} 2
eval instant at 50m SUM(http_requests) BY (job) / 0
{job="api-server"} +Inf
{job="app-server"} +Inf
eval instant at 50m SUM(http_requests) BY (job) > 1000
{job="app-server"} 2600
eval instant at 50m 1000 < SUM(http_requests) BY (job)
{job="app-server"} 1000
eval instant at 50m SUM(http_requests) BY (job) <= 1000
{job="api-server"} 1000
eval instant at 50m SUM(http_requests) BY (job) != 1000
{job="app-server"} 2600
eval instant at 50m SUM(http_requests) BY (job) == 1000
{job="api-server"} 1000
eval instant at 50m SUM(http_requests) BY (job) + SUM(http_requests) BY (job)
{job="api-server"} 2000
{job="app-server"} 5200
eval instant at 50m http_requests{job="api-server", group="canary"}
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="canary", instance="1", job="api-server"} 400
eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[5m]) * 5 * 60
{group="canary", instance="0", job="api-server"} 330
{group="canary", instance="1", job="api-server"} 440
eval instant at 50m rate(http_requests[25m]) * 25 * 60
{group="canary", instance="0", job="api-server"} 150
{group="canary", instance="0", job="app-server"} 350
{group="canary", instance="1", job="api-server"} 200
{group="canary", instance="1", job="app-server"} 400
{group="production", instance="0", job="api-server"} 50
{group="production", instance="0", job="app-server"} 249.99999999999997
{group="production", instance="1", job="api-server"} 100
{group="production", instance="1", job="app-server"} 300
eval instant at 50m delta(http_requests[25m], 1)
{group="canary", instance="0", job="api-server"} 150
{group="canary", instance="0", job="app-server"} 350
{group="canary", instance="1", job="api-server"} 200
{group="canary", instance="1", job="app-server"} 400
{group="production", instance="0", job="api-server"} 50
{group="production", instance="0", job="app-server"} 250
{group="production", instance="1", job="api-server"} 100
{group="production", instance="1", job="app-server"} 300
eval_ordered instant at 50m sort(http_requests)
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="canary", instance="1", job="api-server"} 400
http_requests{group="production", instance="0", job="app-server"} 500
http_requests{group="production", instance="1", job="app-server"} 600
http_requests{group="canary", instance="0", job="app-server"} 700
http_requests{group="canary", instance="1", job="app-server"} 800
eval_ordered instant at 50m sort(0 / round(http_requests, 400) + http_requests)
{group="production", instance="0", job="api-server"} NaN
{group="production", instance="1", job="api-server"} 200
{group="canary", instance="0", job="api-server"} 300
{group="canary", instance="1", job="api-server"} 400
{group="production", instance="0", job="app-server"} 500
{group="production", instance="1", job="app-server"} 600
{group="canary", instance="0", job="app-server"} 700
{group="canary", instance="1", job="app-server"} 800
eval_ordered instant at 50m sort_desc(http_requests)
http_requests{group="canary", instance="1", job="app-server"} 800
http_requests{group="canary", instance="0", job="app-server"} 700
http_requests{group="production", instance="1", job="app-server"} 600
http_requests{group="production", instance="0", job="app-server"} 500
http_requests{group="canary", instance="1", job="api-server"} 400
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="production", instance="0", job="api-server"} 100
eval_ordered instant at 50m topk(3, http_requests)
http_requests{group="canary", instance="1", job="app-server"} 800
http_requests{group="canary", instance="0", job="app-server"} 700
http_requests{group="production", instance="1", job="app-server"} 600
eval_ordered instant at 50m topk(5, http_requests{group="canary",job="app-server"})
http_requests{group="canary", instance="1", job="app-server"} 800
http_requests{group="canary", instance="0", job="app-server"} 700
eval_ordered instant at 50m bottomk(3, http_requests)
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="0", job="api-server"} 300
eval_ordered instant at 50m bottomk(5, http_requests{group="canary",job="app-server"})
http_requests{group="canary", instance="0", job="app-server"} 700
http_requests{group="canary", instance="1", job="app-server"} 800
# Single-letter label names and values.
eval instant at 50m x{y="testvalue"}
x{y="testvalue"} 100
# Lower-cased aggregation operators should work too.
eval instant at 50m sum(http_requests) by (job) + min(http_requests) by (job) + max(http_requests) by (job) + avg(http_requests) by (job)
{job="app-server"} 4550
{job="api-server"} 1750
# Deltas should be adjusted for target interval vs. samples under target interval.
eval instant at 50m delta(http_requests{group="canary", instance="1", job="app-server"}[18m])
{group="canary", instance="1", job="app-server"} 288
# Deltas should perform the same operation when 2nd argument is 0.
eval instant at 50m delta(http_requests{group="canary", instance="1", job="app-server"}[18m], 0)
{group="canary", instance="1", job="app-server"} 288
# Rates should calculate per-second rates.
eval instant at 50m rate(http_requests{group="canary", instance="1", job="app-server"}[60m])
{group="canary", instance="1", job="app-server"} 0.26666666666666666
# Deriv should return the same as rate in simple cases.
eval instant at 50m deriv(http_requests{group="canary", instance="1", job="app-server"}[60m])
{group="canary", instance="1", job="app-server"} 0.26666666666666666
# Counter resets at in the middle of range are handled correctly by rate().
eval instant at 50m rate(testcounter_reset_middle[60m])
{} 0.03
# Counter resets at end of range are ignored by rate().
eval instant at 50m rate(testcounter_reset_end[5m])
{} 0
# Deriv should return correct result.
eval instant at 50m deriv(testcounter_reset_middle[100m])
{} 0.010606060606060607
# count_scalar for a non-empty vector should return scalar element count.
eval instant at 50m count_scalar(http_requests)
8
# count_scalar for an empty vector should return scalar 0.
eval instant at 50m count_scalar(nonexistent)
0
eval instant at 50m http_requests{group!="canary"}
http_requests{group="production", instance="1", job="app-server"} 600
http_requests{group="production", instance="0", job="app-server"} 500
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="production", instance="0", job="api-server"} 100
eval instant at 50m http_requests{job=~"server",group!="canary"}
http_requests{group="production", instance="1", job="app-server"} 600
http_requests{group="production", instance="0", job="app-server"} 500
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="production", instance="0", job="api-server"} 100
eval instant at 50m http_requests{job!~"api",group!="canary"}
http_requests{group="production", instance="1", job="app-server"} 600
http_requests{group="production", instance="0", job="app-server"} 500
eval instant at 50m count_scalar(http_requests{job=~"^server$"})
0
eval instant at 50m http_requests{group="production",job=~"^api"}
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
eval instant at 50m abs(-1 * http_requests{group="production",job="api-server"})
{group="production", instance="0", job="api-server"} 100
{group="production", instance="1", job="api-server"} 200
eval instant at 50m floor(0.004 * http_requests{group="production",job="api-server"})
{group="production", instance="0", job="api-server"} 0
{group="production", instance="1", job="api-server"} 0
eval instant at 50m ceil(0.004 * http_requests{group="production",job="api-server"})
{group="production", instance="0", job="api-server"} 1
{group="production", instance="1", job="api-server"} 1
eval instant at 50m round(0.004 * http_requests{group="production",job="api-server"})
{group="production", instance="0", job="api-server"} 0
{group="production", instance="1", job="api-server"} 1
# Round should correctly handle negative numbers.
eval instant at 50m round(-1 * (0.004 * http_requests{group="production",job="api-server"}))
{group="production", instance="0", job="api-server"} 0
{group="production", instance="1", job="api-server"} -1
# Round should round half up.
eval instant at 50m round(0.005 * http_requests{group="production",job="api-server"})
{group="production", instance="0", job="api-server"} 1
{group="production", instance="1", job="api-server"} 1
eval instant at 50m round(-1 * (0.005 * http_requests{group="production",job="api-server"}))
{group="production", instance="0", job="api-server"} 0
{group="production", instance="1", job="api-server"} -1
eval instant at 50m round(1 + 0.005 * http_requests{group="production",job="api-server"})
{group="production", instance="0", job="api-server"} 2
{group="production", instance="1", job="api-server"} 2
eval instant at 50m round(-1 * (1 + 0.005 * http_requests{group="production",job="api-server"}))
{group="production", instance="0", job="api-server"} -1
{group="production", instance="1", job="api-server"} -2
# Round should accept the number to round nearest to.
eval instant at 50m round(0.0005 * http_requests{group="production",job="api-server"}, 0.1)
{group="production", instance="0", job="api-server"} 0.1
{group="production", instance="1", job="api-server"} 0.1
eval instant at 50m round(2.1 + 0.0005 * http_requests{group="production",job="api-server"}, 0.1)
{group="production", instance="0", job="api-server"} 2.2
{group="production", instance="1", job="api-server"} 2.2
eval instant at 50m round(5.2 + 0.0005 * http_requests{group="production",job="api-server"}, 0.1)
{group="production", instance="0", job="api-server"} 5.3
{group="production", instance="1", job="api-server"} 5.3
# Round should work correctly with negative numbers and multiple decimal places.
eval instant at 50m round(-1 * (5.2 + 0.0005 * http_requests{group="production",job="api-server"}), 0.1)
{group="production", instance="0", job="api-server"} -5.2
{group="production", instance="1", job="api-server"} -5.3
# Round should work correctly with big toNearests.
eval instant at 50m round(0.025 * http_requests{group="production",job="api-server"}, 5)
{group="production", instance="0", job="api-server"} 5
{group="production", instance="1", job="api-server"} 5
eval instant at 50m round(0.045 * http_requests{group="production",job="api-server"}, 5)
{group="production", instance="0", job="api-server"} 5
{group="production", instance="1", job="api-server"} 10
eval instant at 50m avg_over_time(http_requests{group="production",job="api-server"}[1h])
{group="production", instance="0", job="api-server"} 50
{group="production", instance="1", job="api-server"} 100
eval instant at 50m count_over_time(http_requests{group="production",job="api-server"}[1h])
{group="production", instance="0", job="api-server"} 11
{group="production", instance="1", job="api-server"} 11
eval instant at 50m max_over_time(http_requests{group="production",job="api-server"}[1h])
{group="production", instance="0", job="api-server"} 100
{group="production", instance="1", job="api-server"} 200
eval instant at 50m min_over_time(http_requests{group="production",job="api-server"}[1h])
{group="production", instance="0", job="api-server"} 0
{group="production", instance="1", job="api-server"} 0
eval instant at 50m sum_over_time(http_requests{group="production",job="api-server"}[1h])
{group="production", instance="0", job="api-server"} 550
{group="production", instance="1", job="api-server"} 1100
eval instant at 50m time()
3000
eval instant at 50m drop_common_labels(http_requests{group="production",job="api-server"})
http_requests{instance="0"} 100
http_requests{instance="1"} 200
eval instant at 50m {__name__=~".*"}
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="canary", instance="0", job="app-server"} 700
http_requests{group="canary", instance="1", job="api-server"} 400
http_requests{group="canary", instance="1", job="app-server"} 800
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="0", job="app-server"} 500
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="production", instance="1", job="app-server"} 600
testcounter_reset_end 0
testcounter_reset_middle 50
x{y="testvalue"} 100
label_grouping_test{a="a", b="abb"} 200
label_grouping_test{a="aa", b="bb"} 100
vector_matching_a{l="x"} 10
vector_matching_a{l="y"} 20
vector_matching_b{l="x"} 40
cpu_count{instance="1", type="smp"} 200
cpu_count{instance="0", type="smp"} 100
cpu_count{instance="0", type="numa"} 300
eval instant at 50m {job=~"server", job!~"api"}
http_requests{group="canary", instance="0", job="app-server"} 700
http_requests{group="canary", instance="1", job="app-server"} 800
http_requests{group="production", instance="0", job="app-server"} 500
http_requests{group="production", instance="1", job="app-server"} 600
# Test alternative "by"-clause order.
eval instant at 50m sum by (group) (http_requests{job="api-server"})
{group="canary"} 700
{group="production"} 300
# Test alternative "by"-clause order with "keeping_extra".
eval instant at 50m sum by (group) keeping_extra (http_requests{job="api-server"})
{group="canary", job="api-server"} 700
{group="production", job="api-server"} 300
# Test both alternative "by"-clause orders in one expression.
# Public health warning: stick to one form within an expression (or even
# in an organization), or risk serious user confusion.
eval instant at 50m sum(sum by (group) keeping_extra (http_requests{job="api-server"})) by (job)
{job="api-server"} 1000
eval instant at 50m http_requests{group="canary"} and http_requests{instance="0"}
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="canary", instance="0", job="app-server"} 700
eval instant at 50m (http_requests{group="canary"} + 1) and http_requests{instance="0"}
{group="canary", instance="0", job="api-server"} 301
{group="canary", instance="0", job="app-server"} 701
eval instant at 50m (http_requests{group="canary"} + 1) and on(instance, job) http_requests{instance="0", group="production"}
{group="canary", instance="0", job="api-server"} 301
{group="canary", instance="0", job="app-server"} 701
eval instant at 50m (http_requests{group="canary"} + 1) and on(instance) http_requests{instance="0", group="production"}
{group="canary", instance="0", job="api-server"} 301
{group="canary", instance="0", job="app-server"} 701
eval instant at 50m http_requests{group="canary"} or http_requests{group="production"}
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="canary", instance="0", job="app-server"} 700
http_requests{group="canary", instance="1", job="api-server"} 400
http_requests{group="canary", instance="1", job="app-server"} 800
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="0", job="app-server"} 500
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="production", instance="1", job="app-server"} 600
# On overlap the rhs samples must be dropped.
eval instant at 50m (http_requests{group="canary"} + 1) or http_requests{instance="1"}
{group="canary", instance="0", job="api-server"} 301
{group="canary", instance="0", job="app-server"} 701
{group="canary", instance="1", job="api-server"} 401
{group="canary", instance="1", job="app-server"} 801
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="production", instance="1", job="app-server"} 600
# Matching only on instance excludes everything that has instance=0/1 but includes
# entries without the instance label.
eval instant at 50m (http_requests{group="canary"} + 1) or on(instance) (http_requests or cpu_count or vector_matching_a)
{group="canary", instance="0", job="api-server"} 301
{group="canary", instance="0", job="app-server"} 701
{group="canary", instance="1", job="api-server"} 401
{group="canary", instance="1", job="app-server"} 801
vector_matching_a{l="x"} 10
vector_matching_a{l="y"} 20
eval instant at 50m http_requests{group="canary"} / on(instance,job) http_requests{group="production"}
{instance="0", job="api-server"} 3
{instance="0", job="app-server"} 1.4
{instance="1", job="api-server"} 2
{instance="1", job="app-server"} 1.3333333333333333
# Include labels must guarantee uniquely identifiable time series.
eval_fail instant at 50m http_requests{group="production"} / on(instance) group_left(group) cpu_count{type="smp"}
# Many-to-many matching is not allowed.
eval_fail instant at 50m http_requests{group="production"} / on(instance) group_left(job,type) cpu_count
# Many-to-one matching must be explicit.
eval_fail instant at 50m http_requests{group="production"} / on(instance) cpu_count{type="smp"}
eval instant at 50m http_requests{group="production"} / on(instance) group_left(job) cpu_count{type="smp"}
{instance="1", job="api-server"} 1
{instance="0", job="app-server"} 5
{instance="1", job="app-server"} 3
{instance="0", job="api-server"} 1
# Ensure sidedness of grouping preserves operand sides.
eval instant at 50m cpu_count{type="smp"} / on(instance) group_right(job) http_requests{group="production"}
{instance="1", job="app-server"} 0.3333333333333333
{instance="0", job="app-server"} 0.2
{instance="1", job="api-server"} 1
{instance="0", job="api-server"} 1
# Include labels from both sides.
eval instant at 50m http_requests{group="production"} / on(instance) group_left(job) cpu_count{type="smp"}
{instance="1", job="api-server"} 1
{instance="0", job="app-server"} 5
{instance="1", job="app-server"} 3
{instance="0", job="api-server"} 1
eval instant at 50m http_requests{group="production"} < on(instance,job) http_requests{group="canary"}
{instance="1", job="app-server"} 600
{instance="0", job="app-server"} 500
{instance="1", job="api-server"} 200
{instance="0", job="api-server"} 100
eval instant at 50m http_requests{group="production"} > on(instance,job) http_requests{group="canary"}
# no output
eval instant at 50m http_requests{group="production"} == on(instance,job) http_requests{group="canary"}
# no output
eval instant at 50m http_requests > on(instance) group_left(group,job) cpu_count{type="smp"}
{group="canary", instance="0", job="app-server"} 700
{group="canary", instance="1", job="app-server"} 800
{group="canary", instance="0", job="api-server"} 300
{group="canary", instance="1", job="api-server"} 400
{group="production", instance="0", job="app-server"} 500
{group="production", instance="1", job="app-server"} 600
eval instant at 50m {l="x"} + on(__name__) {l="y"}
vector_matching_a 30
eval instant at 50m absent(nonexistent)
{} 1
eval instant at 50m absent(nonexistent{job="testjob", instance="testinstance", method=~".x"})
{instance="testinstance", job="testjob"} 1
eval instant at 50m count_scalar(absent(http_requests))
0
eval instant at 50m count_scalar(absent(sum(http_requests)))
0
eval instant at 50m absent(sum(nonexistent{job="testjob", instance="testinstance"}))
{} 1
eval instant at 50m http_requests{group="production",job="api-server"} offset 5m
http_requests{group="production", instance="0", job="api-server"} 90
http_requests{group="production", instance="1", job="api-server"} 180
eval instant at 50m rate(http_requests{group="production",job="api-server"}[10m] offset 5m)
{group="production", instance="0", job="api-server"} 0.03333333333333333
{group="production", instance="1", job="api-server"} 0.06666666666666667
# Regression test for missing separator byte in labelsToGroupingKey.
eval instant at 50m sum(label_grouping_test) by (a, b)
{a="a", b="abb"} 200
{a="aa", b="bb"} 100
eval instant at 50m http_requests{group="canary", instance="0", job="api-server"} / 0
{group="canary", instance="0", job="api-server"} +Inf
eval instant at 50m -1 * http_requests{group="canary", instance="0", job="api-server"} / 0
{group="canary", instance="0", job="api-server"} -Inf
eval instant at 50m 0 * http_requests{group="canary", instance="0", job="api-server"} / 0
{group="canary", instance="0", job="api-server"} NaN
eval instant at 50m 0 * http_requests{group="canary", instance="0", job="api-server"} % 0
{group="canary", instance="0", job="api-server"} NaN
eval instant at 50m exp(vector_matching_a)
{l="x"} 22026.465794806718
{l="y"} 485165195.4097903
eval instant at 50m exp(vector_matching_a - 10)
{l="y"} 22026.465794806718
{l="x"} 1
eval instant at 50m exp(vector_matching_a - 20)
{l="x"} 4.5399929762484854e-05
{l="y"} 1
eval instant at 50m ln(vector_matching_a)
{l="x"} 2.302585092994046
{l="y"} 2.995732273553991
eval instant at 50m ln(vector_matching_a - 10)
{l="y"} 2.302585092994046
{l="x"} -Inf
eval instant at 50m ln(vector_matching_a - 20)
{l="y"} -Inf
{l="x"} NaN
eval instant at 50m exp(ln(vector_matching_a))
{l="y"} 20
{l="x"} 10
eval instant at 50m sqrt(vector_matching_a)
{l="x"} 3.1622776601683795
{l="y"} 4.47213595499958
eval instant at 50m log2(vector_matching_a)
{l="x"} 3.3219280948873626
{l="y"} 4.321928094887363
eval instant at 50m log2(vector_matching_a - 10)
{l="y"} 3.3219280948873626
{l="x"} -Inf
eval instant at 50m log2(vector_matching_a - 20)
{l="x"} NaN
{l="y"} -Inf
eval instant at 50m log10(vector_matching_a)
{l="x"} 1
{l="y"} 1.301029995663981
eval instant at 50m log10(vector_matching_a - 10)
{l="y"} 1
{l="x"} -Inf
eval instant at 50m log10(vector_matching_a - 20)
{l="x"} NaN
{l="y"} -Inf
eval instant at 50m stddev(http_requests)
{} 229.12878474779
eval instant at 50m stddev by (instance)(http_requests)
{instance="0"} 223.60679774998
{instance="1"} 223.60679774998
eval instant at 50m stdvar(http_requests)
{} 52500
eval instant at 50m stdvar by (instance)(http_requests)
{instance="0"} 50000
{instance="1"} 50000
# Matrix tests.
clear
load 1h
testmetric{testlabel="1"} 1 1
testmetric{testlabel="2"} _ 2
eval instant at 0h drop_common_labels(testmetric)
testmetric 1
eval instant at 1h drop_common_labels(testmetric)
testmetric{testlabel="1"} 1
testmetric{testlabel="2"} 2
clear
load 1h
testmetric{testlabel="1"} 1 1
testmetric{testlabel="2"} 2 _
eval instant at 0h sum(testmetric) keeping_extra
{} 3
eval instant at 1h sum(testmetric) keeping_extra
{testlabel="1"} 1
clear
load 1h
testmetric{aa="bb"} 1
testmetric{a="abb"} 2
eval instant at 0h testmetric
testmetric{aa="bb"} 1
testmetric{a="abb"} 2

56
promql/testdata/literals.test vendored Normal file
View file

@ -0,0 +1,56 @@
eval instant at 50m 12.34e6
12340000
eval instant at 50m 12.34e+6
12340000
eval instant at 50m 12.34e-6
0.00001234
eval instant at 50m 1+1
2
eval instant at 50m 1-1
0
eval instant at 50m 1 - -1
2
eval instant at 50m .2
0.2
eval instant at 50m +0.2
0.2
eval instant at 50m -0.2e-6
-0.0000002
eval instant at 50m +Inf
+Inf
eval instant at 50m inF
+Inf
eval instant at 50m -inf
-Inf
eval instant at 50m NaN
NaN
eval instant at 50m nan
NaN
eval instant at 50m 2.
2
eval instant at 50m 1 / 0
+Inf
eval instant at 50m -1 / 0
-Inf
eval instant at 50m 0 / 0
NaN
eval instant at 50m 1 % 0
NaN