make matrix selection and lookback left-open and right-closed

Signed-off-by: Zhang Zhanpeng <zhangzhanpeng.zzp@alibaba-inc.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Co-authored-by: beorn7 <beorn@grafana.com>
This commit is contained in:
Zhang Zhanpeng 2024-04-09 00:46:52 +08:00
parent 0c76210e83
commit debbdb8608
15 changed files with 421 additions and 359 deletions

View file

@ -89,11 +89,11 @@ tests:
# Ensure lookback delta is respected, when a value is missing.
- expr: timestamp(test_missing)
eval_time: 5m
eval_time: 4m59s
exp_samples:
- value: 0
- expr: timestamp(test_missing)
eval_time: 5m1s
eval_time: 5m
exp_samples: []
# Minimal test case to check edge case of a single sample.
@ -113,7 +113,7 @@ tests:
- expr: count_over_time(fixed_data[1h])
eval_time: 1h
exp_samples:
- value: 61
- value: 60
- expr: timestamp(fixed_data)
eval_time: 1h
exp_samples:
@ -183,7 +183,7 @@ tests:
- expr: job:test:count_over_time1m
eval_time: 1m
exp_samples:
- value: 61
- value: 60
labels: 'job:test:count_over_time1m{job="test"}'
- expr: timestamp(job:test:count_over_time1m)
eval_time: 1m10s
@ -194,7 +194,7 @@ tests:
- expr: job:test:count_over_time1m
eval_time: 2m
exp_samples:
- value: 61
- value: 60
labels: 'job:test:count_over_time1m{job="test"}'
- expr: timestamp(job:test:count_over_time1m)
eval_time: 2m59s999ms

View file

@ -189,12 +189,12 @@ Range vector literals work like instant vector literals, except that they
select a range of samples back from the current instant. Syntactically, a [time
duration](#time-durations) is appended in square brackets (`[]`) at the end of
a vector selector to specify how far back in time values should be fetched for
each resulting range vector element. The range is a closed interval,
i.e. samples with timestamps coinciding with either boundary of the range are
still included in the selection.
each resulting range vector element. The range is a left-open and right-closed interval,
i.e. samples with timestamps coinciding with the left boundary of the range are excluded from the selection,
while samples coinciding with the right boundary of the range are included in the selection.
In this example, we select all the values we have recorded within the last 5
minutes for all time series that have the metric name `http_requests_total` and
In this example, we select all the values recorded less than 5m ago for all time series
that have the metric name `http_requests_total` and
a `job` label set to `prometheus`:
http_requests_total{job="prometheus"}[5m]
@ -335,7 +335,7 @@ independently of the actual present time series data. This is mainly to support
cases like aggregation (`sum`, `avg`, and so on), where multiple aggregated
time series do not precisely align in time. Because of their independence,
Prometheus needs to assign a value at those timestamps for each relevant time
series. It does so by taking the newest sample before this timestamp within the lookback period.
series. It does so by taking the newest sample that is less than the lookback period ago.
The lookback period is 5 minutes by default.
If a target scrape or rule evaluation no longer returns a sample for a time

View file

@ -887,11 +887,17 @@ func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path
}
if evalRange == 0 {
start -= durationMilliseconds(s.LookbackDelta)
// Reduce the start by one fewer ms than the lookback delta
// because wo want to exclude samples that are precisely the
// lookback delta before the eval time.
start -= durationMilliseconds(s.LookbackDelta) - 1
} else {
// For all matrix queries we want to ensure that we have (end-start) + range selected
// this way we have `range` data before the start time
start -= durationMilliseconds(evalRange)
// For all matrix queries we want to ensure that we have
// (end-start) + range selected this way we have `range` data
// before the start time. We subtract one from the range to
// exclude samples positioned directly at the lower boundary of
// the range.
start -= durationMilliseconds(evalRange) - 1
}
offsetMilliseconds := durationMilliseconds(n.OriginalOffset)
@ -2021,7 +2027,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec
seriesIterators := make([]*storage.MemoizedSeriesIterator, len(vs.Series))
for i, s := range vs.Series {
it := s.Iterator(nil)
seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta))
seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1)
}
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
@ -2083,7 +2089,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no
if valueType == chunkenc.ValNone || t > refTime {
var ok bool
t, v, h, ok = it.PeekPrev()
if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) {
if !ok || t <= refTime-durationMilliseconds(ev.lookbackDelta) {
return 0, 0, nil, false
}
}
@ -2217,20 +2223,20 @@ func (ev *evaluator) matrixIterSlice(
mintFloats, mintHistograms := mint, mint
// First floats...
if len(floats) > 0 && floats[len(floats)-1].T >= mint {
if len(floats) > 0 && floats[len(floats)-1].T > mint {
// There is an overlap between previous and current ranges, retain common
// points. In most such cases:
// (a) the overlap is significantly larger than the eval step; and/or
// (b) the number of samples is relatively small.
// so a linear search will be as fast as a binary search.
var drop int
for drop = 0; floats[drop].T < mint; drop++ {
for drop = 0; floats[drop].T <= mint; drop++ {
}
ev.currentSamples -= drop
copy(floats, floats[drop:])
floats = floats[:len(floats)-drop]
// Only append points with timestamps after the last timestamp we have.
mintFloats = floats[len(floats)-1].T + 1
mintFloats = floats[len(floats)-1].T
} else {
ev.currentSamples -= len(floats)
if floats != nil {
@ -2239,14 +2245,14 @@ func (ev *evaluator) matrixIterSlice(
}
// ...then the same for histograms. TODO(beorn7): Use generics?
if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint {
if len(histograms) > 0 && histograms[len(histograms)-1].T > mint {
// There is an overlap between previous and current ranges, retain common
// points. In most such cases:
// (a) the overlap is significantly larger than the eval step; and/or
// (b) the number of samples is relatively small.
// so a linear search will be as fast as a binary search.
var drop int
for drop = 0; histograms[drop].T < mint; drop++ {
for drop = 0; histograms[drop].T <= mint; drop++ {
}
// Rotate the buffer around the drop index so that points before mint can be
// reused to store new histograms.
@ -2257,7 +2263,7 @@ func (ev *evaluator) matrixIterSlice(
histograms = histograms[:len(histograms)-drop]
ev.currentSamples -= totalHPointSize(histograms)
// Only append points with timestamps after the last timestamp we have.
mintHistograms = histograms[len(histograms)-1].T + 1
mintHistograms = histograms[len(histograms)-1].T
} else {
ev.currentSamples -= totalHPointSize(histograms)
if histograms != nil {
@ -2281,7 +2287,7 @@ loop:
case chunkenc.ValFloatHistogram, chunkenc.ValHistogram:
t := buf.AtT()
// Values in the buffer are guaranteed to be smaller than maxt.
if t >= mintHistograms {
if t > mintHistograms {
if histograms == nil {
histograms = getMatrixSelectorHPoints()
}
@ -2307,7 +2313,7 @@ loop:
continue loop
}
// Values in the buffer are guaranteed to be smaller than maxt.
if t >= mintFloats {
if t > mintFloats {
ev.currentSamples++
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env))

View file

@ -327,271 +327,271 @@ func TestSelectHintsSetCorrectly(t *testing.T) {
{
query: "foo", start: 10000,
expected: []*storage.SelectHints{
{Start: 5000, End: 10000},
{Start: 5001, End: 10000},
},
}, {
query: "foo @ 15", start: 10000,
expected: []*storage.SelectHints{
{Start: 10000, End: 15000},
{Start: 10001, End: 15000},
},
}, {
query: "foo @ 1", start: 10000,
expected: []*storage.SelectHints{
{Start: -4000, End: 1000},
{Start: -3999, End: 1000},
},
}, {
query: "foo[2m]", start: 200000,
expected: []*storage.SelectHints{
{Start: 80000, End: 200000, Range: 120000},
{Start: 80001, End: 200000, Range: 120000},
},
}, {
query: "foo[2m] @ 180", start: 200000,
expected: []*storage.SelectHints{
{Start: 60000, End: 180000, Range: 120000},
{Start: 60001, End: 180000, Range: 120000},
},
}, {
query: "foo[2m] @ 300", start: 200000,
expected: []*storage.SelectHints{
{Start: 180000, End: 300000, Range: 120000},
{Start: 180001, End: 300000, Range: 120000},
},
}, {
query: "foo[2m] @ 60", start: 200000,
expected: []*storage.SelectHints{
{Start: -60000, End: 60000, Range: 120000},
{Start: -59999, End: 60000, Range: 120000},
},
}, {
query: "foo[2m] offset 2m", start: 300000,
expected: []*storage.SelectHints{
{Start: 60000, End: 180000, Range: 120000},
{Start: 60001, End: 180000, Range: 120000},
},
}, {
query: "foo[2m] @ 200 offset 2m", start: 300000,
expected: []*storage.SelectHints{
{Start: -40000, End: 80000, Range: 120000},
{Start: -39999, End: 80000, Range: 120000},
},
}, {
query: "foo[2m:1s]", start: 300000,
expected: []*storage.SelectHints{
{Start: 175000, End: 300000, Step: 1000},
{Start: 175001, End: 300000, Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s])", start: 300000,
expected: []*storage.SelectHints{
{Start: 175000, End: 300000, Func: "count_over_time", Step: 1000},
{Start: 175001, End: 300000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] @ 300)", start: 200000,
expected: []*storage.SelectHints{
{Start: 175000, End: 300000, Func: "count_over_time", Step: 1000},
{Start: 175001, End: 300000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] @ 200)", start: 200000,
expected: []*storage.SelectHints{
{Start: 75000, End: 200000, Func: "count_over_time", Step: 1000},
{Start: 75001, End: 200000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] @ 100)", start: 200000,
expected: []*storage.SelectHints{
{Start: -25000, End: 100000, Func: "count_over_time", Step: 1000},
{Start: -24999, End: 100000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000,
expected: []*storage.SelectHints{
{Start: 165000, End: 290000, Func: "count_over_time", Step: 1000},
{Start: 165001, End: 290000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000,
expected: []*storage.SelectHints{
{Start: 155000, End: 280000, Func: "count_over_time", Step: 1000},
{Start: 155001, End: 280000, Func: "count_over_time", Step: 1000},
},
}, {
// When the @ is on the vector selector, the enclosing subquery parameters
// don't affect the hint ranges.
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000,
expected: []*storage.SelectHints{
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
{Start: 185001, End: 190000, Func: "count_over_time", Step: 1000},
},
}, {
// When the @ is on the vector selector, the enclosing subquery parameters
// don't affect the hint ranges.
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000,
expected: []*storage.SelectHints{
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
{Start: 185001, End: 190000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000,
expected: []*storage.SelectHints{
{Start: -45000, End: 80000, Func: "count_over_time", Step: 1000},
{Start: -44999, End: 80000, Func: "count_over_time", Step: 1000},
},
}, {
query: "foo", start: 10000, end: 20000,
expected: []*storage.SelectHints{
{Start: 5000, End: 20000, Step: 1000},
{Start: 5001, End: 20000, Step: 1000},
},
}, {
query: "foo @ 15", start: 10000, end: 20000,
expected: []*storage.SelectHints{
{Start: 10000, End: 15000, Step: 1000},
{Start: 10001, End: 15000, Step: 1000},
},
}, {
query: "foo @ 1", start: 10000, end: 20000,
expected: []*storage.SelectHints{
{Start: -4000, End: 1000, Step: 1000},
{Start: -3999, End: 1000, Step: 1000},
},
}, {
query: "rate(foo[2m] @ 180)", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000},
{Start: 60001, End: 180000, Range: 120000, Func: "rate", Step: 1000},
},
}, {
query: "rate(foo[2m] @ 300)", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000},
{Start: 180001, End: 300000, Range: 120000, Func: "rate", Step: 1000},
},
}, {
query: "rate(foo[2m] @ 60)", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000},
{Start: -59999, End: 60000, Range: 120000, Func: "rate", Step: 1000},
},
}, {
query: "rate(foo[2m])", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000},
{Start: 80001, End: 500000, Range: 120000, Func: "rate", Step: 1000},
},
}, {
query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000},
{Start: 60001, End: 380000, Range: 120000, Func: "rate", Step: 1000},
},
}, {
query: "rate(foo[2m:1s])", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 175000, End: 500000, Func: "rate", Step: 1000},
{Start: 175001, End: 500000, Func: "rate", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 175000, End: 500000, Func: "count_over_time", Step: 1000},
{Start: 175001, End: 500000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 165000, End: 490000, Func: "count_over_time", Step: 1000},
{Start: 165001, End: 490000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: 175000, End: 300000, Func: "count_over_time", Step: 1000},
{Start: 175001, End: 300000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: 75000, End: 200000, Func: "count_over_time", Step: 1000},
{Start: 75001, End: 200000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: -25000, End: 100000, Func: "count_over_time", Step: 1000},
{Start: -24999, End: 100000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 155000, End: 480000, Func: "count_over_time", Step: 1000},
{Start: 155001, End: 480000, Func: "count_over_time", Step: 1000},
},
}, {
// When the @ is on the vector selector, the enclosing subquery parameters
// don't affect the hint ranges.
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
{Start: 185001, End: 190000, Func: "count_over_time", Step: 1000},
},
}, {
// When the @ is on the vector selector, the enclosing subquery parameters
// don't affect the hint ranges.
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
{Start: 185001, End: 190000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: -45000, End: 80000, Func: "count_over_time", Step: 1000},
{Start: -44999, End: 80000, Func: "count_over_time", Step: 1000},
},
}, {
query: "sum by (dim1) (foo)", start: 10000,
expected: []*storage.SelectHints{
{Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}},
{Start: 5001, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}},
},
}, {
query: "sum without (dim1) (foo)", start: 10000,
expected: []*storage.SelectHints{
{Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}},
{Start: 5001, End: 10000, Func: "sum", Grouping: []string{"dim1"}},
},
}, {
query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000,
expected: []*storage.SelectHints{
{Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000},
{Start: 9001, End: 10000, Func: "avg_over_time", Range: 1000},
},
}, {
query: "sum by (dim1) (max by (dim2) (foo))", start: 10000,
expected: []*storage.SelectHints{
{Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}},
{Start: 5001, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}},
},
}, {
query: "(max by (dim1) (foo))[5s:1s]", start: 10000,
expected: []*storage.SelectHints{
{Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000},
{Start: 1, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000},
},
}, {
query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000,
expected: []*storage.SelectHints{
{Start: 95000, End: 120000, Func: "sum", By: true, Step: 5000},
{Start: 95000, End: 120000, Func: "max", By: true, Step: 5000},
{Start: 95001, End: 120000, Func: "sum", By: true, Step: 5000},
{Start: 95001, End: 120000, Func: "max", By: true, Step: 5000},
},
}, {
query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000,
expected: []*storage.SelectHints{
{Start: 45000, End: 50000, Step: 1000},
{Start: 245000, End: 250000, Step: 1000},
{Start: 895000, End: 900000, Step: 1000},
{Start: 45001, End: 50000, Step: 1000},
{Start: 245001, End: 250000, Step: 1000},
{Start: 895001, End: 900000, Step: 1000},
},
}, {
query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000,
expected: []*storage.SelectHints{
{Start: 45000, End: 50000, Step: 1000},
{Start: 95000, End: 500000, Step: 1000},
{Start: 895000, End: 900000, Step: 1000},
{Start: 45001, End: 50000, Step: 1000},
{Start: 95001, End: 500000, Step: 1000},
{Start: 895001, End: 900000, Step: 1000},
},
}, {
query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000,
expected: []*storage.SelectHints{
{Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000},
{Start: 245000, End: 250000, Step: 1000},
{Start: 895000, End: 900000, Step: 1000},
{Start: 48001, End: 50000, Step: 1000, Func: "rate", Range: 2000},
{Start: 245001, End: 250000, Step: 1000},
{Start: 895001, End: 900000, Step: 1000},
},
}, {
query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000,
expected: []*storage.SelectHints{
{Start: 43000, End: 50000, Step: 1000, Func: "rate"},
{Start: 95000, End: 500000, Step: 1000},
{Start: 95000, End: 500000, Step: 1000},
{Start: 43001, End: 50000, Step: 1000, Func: "rate"},
{Start: 95001, End: 500000, Step: 1000},
{Start: 95001, End: 500000, Step: 1000},
},
}, {
query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000,
expected: []*storage.SelectHints{
{Start: 43000, End: 50000, Step: 1000, Func: "rate"},
{Start: 95000, End: 500000, Step: 1000},
{Start: 655000, End: 780000, Step: 1000, Func: "rate"},
{Start: 43001, End: 50000, Step: 1000, Func: "rate"},
{Start: 95001, End: 500000, Step: 1000},
{Start: 655001, End: 780000, Step: 1000, Func: "rate"},
},
}, { // Hints are based on the inner most subquery timestamp.
query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000,
expected: []*storage.SelectHints{
{Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000},
{Start: -149999, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000},
},
}, { // Hints are based on the inner most subquery timestamp.
query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`,
expected: []*storage.SelectHints{
{Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000},
{Start: 2800001, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000},
},
},
} {
@ -941,22 +941,20 @@ load 10s
},
},
{
Query: "max_over_time(metricWith1SampleEvery10Seconds[59s])[20s:5s]",
Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]",
Start: time.Unix(201, 0),
PeakSamples: 10,
TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 20/5 (using 59s so we always return 6 samples
// as if we run a query on 00 looking back 60 seconds we will return 7 samples;
// see next test).
TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 4
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 24,
},
},
{
Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]",
Query: "max_over_time(metricWith1SampleEvery10Seconds[61s])[20s:5s]",
Start: time.Unix(201, 0),
PeakSamples: 11,
TotalSamples: 26, // (1 sample / 10 seconds * 60 seconds) * 4 + 2 as
// max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples.
// max_over_time(metricWith1SampleEvery10Seconds[61s]) @ 190 and 200 will return 7 samples.
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 26,
},
@ -965,10 +963,9 @@ load 10s
Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]",
Start: time.Unix(201, 0),
PeakSamples: 72,
TotalSamples: 312, // (1 histogram (size 12) / 10 seconds * 60 seconds) * 4 + 2 * 12 as
// max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples.
TotalSamples: 288, // (1 histogram (size 12) / 10 seconds * 60 seconds) * 4
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 312,
201000: 288,
},
},
{
@ -1433,23 +1430,23 @@ load 10s
},
{
// The peak samples in memory is during the first evaluation:
// - Subquery takes 22 samples, 11 for each bigmetric,
// - Result is calculated per series where the series samples is buffered, hence 11 more here.
// - Subquery takes 22 samples, 11 for each bigmetric, but samples on the left bound won't be evaluated.
// - Result is calculated per series where the series samples is buffered, hence 10 more here.
// - The result of two series is added before the last series buffer is discarded, so 2 more here.
// Hence at peak it is 22 (subquery) + 11 (buffer of a series) + 2 (result from 2 series).
// Hence at peak it is 22 (subquery) + 10 (buffer of a series) + 2 (result from 2 series).
// The subquery samples and the buffer is discarded before duplicating.
Query: `rate(bigmetric[10s:1s] @ 10)`,
MaxSamples: 35,
MaxSamples: 34,
Start: time.Unix(0, 0),
End: time.Unix(10, 0),
Interval: 5 * time.Second,
},
{
// Here the reasoning is same as above. But LHS and RHS are done one after another.
// So while one of them takes 35 samples at peak, we need to hold the 2 sample
// So while one of them takes 34 samples at peak, we need to hold the 2 sample
// result of the other till then.
Query: `rate(bigmetric[10s:1s] @ 10) + rate(bigmetric[10s:1s] @ 30)`,
MaxSamples: 37,
MaxSamples: 36,
Start: time.Unix(0, 0),
End: time.Unix(10, 0),
Interval: 5 * time.Second,
@ -1458,20 +1455,20 @@ load 10s
// promql.Sample as above but with only 1 part as step invariant.
// Here the peak is caused by the non-step invariant part as it touches more time range.
// Hence at peak it is 2*21 (subquery from 0s to 20s)
// + 11 (buffer of a series per evaluation)
// + 10 (buffer of a series per evaluation)
// + 6 (result from 2 series at 3 eval times).
Query: `rate(bigmetric[10s:1s]) + rate(bigmetric[10s:1s] @ 30)`,
MaxSamples: 59,
MaxSamples: 58,
Start: time.Unix(10, 0),
End: time.Unix(20, 0),
Interval: 5 * time.Second,
},
{
// Nested subquery.
// We saw that innermost rate takes 35 samples which is still the peak
// We saw that innermost rate takes 34 samples which is still the peak
// since the other two subqueries just duplicate the result.
Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`,
MaxSamples: 35,
MaxSamples: 34,
Start: time.Unix(10, 0),
},
{
@ -1585,11 +1582,11 @@ load 1ms
start: 10,
result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}},
Floats: []promql.FPoint{{F: 29, T: 290000}, {F: 30, T: 300000}},
Metric: lbls1,
},
promql.Series{
Floats: []promql.FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}},
Floats: []promql.FPoint{{F: 58, T: 290000}, {F: 60, T: 300000}},
Metric: lbls2,
},
},
@ -1598,7 +1595,7 @@ load 1ms
start: 100,
result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}},
Floats: []promql.FPoint{{F: 2, T: -1000}, {F: 1, T: 0}},
Metric: lblsneg,
},
},
@ -1607,7 +1604,7 @@ load 1ms
start: 100,
result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}},
Floats: []promql.FPoint{{F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}},
Metric: lblsneg,
},
},
@ -1616,7 +1613,7 @@ load 1ms
start: 100,
result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}},
Floats: []promql.FPoint{{F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}},
Metric: lblsms,
},
},
@ -3038,7 +3035,7 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
ts time.Time
}{
"matches series with points in range": {
expr: "some_metric[1m]",
expr: "some_metric[2m]",
ts: baseT.Add(2 * time.Minute),
expected: promql.Matrix{
{
@ -3074,7 +3071,6 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
{
Metric: labels.FromStrings("__name__", "some_metric_with_stale_marker"),
Floats: []promql.FPoint{
{T: timestamp.FromTime(baseT), F: 0},
{T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1},
{T: timestamp.FromTime(baseT.Add(3 * time.Minute)), F: 3},
},
@ -3295,11 +3291,11 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
newTs := ts + offset*int64(time.Minute/time.Millisecond)
// sum_over_time().
queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset)
queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1)
queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}})
// avg_over_time().
queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset)
queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1)
queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}})
})
idx0++
@ -3724,43 +3720,43 @@ metric 0 1 2
}{
{
name: "default lookback delta",
ts: lastDatapointTs.Add(defaultLookbackDelta),
ts: lastDatapointTs.Add(defaultLookbackDelta - time.Millisecond),
expectSamples: true,
},
{
name: "outside default lookback delta",
ts: lastDatapointTs.Add(defaultLookbackDelta + time.Millisecond),
ts: lastDatapointTs.Add(defaultLookbackDelta),
expectSamples: false,
},
{
name: "custom engine lookback delta",
ts: lastDatapointTs.Add(10 * time.Minute),
ts: lastDatapointTs.Add(10*time.Minute - time.Millisecond),
engineLookback: 10 * time.Minute,
expectSamples: true,
},
{
name: "outside custom engine lookback delta",
ts: lastDatapointTs.Add(10*time.Minute + time.Millisecond),
ts: lastDatapointTs.Add(10 * time.Minute),
engineLookback: 10 * time.Minute,
expectSamples: false,
},
{
name: "custom query lookback delta",
ts: lastDatapointTs.Add(20 * time.Minute),
ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond),
engineLookback: 10 * time.Minute,
queryLookback: 20 * time.Minute,
expectSamples: true,
},
{
name: "outside custom query lookback delta",
ts: lastDatapointTs.Add(20*time.Minute + time.Millisecond),
ts: lastDatapointTs.Add(20 * time.Minute),
engineLookback: 10 * time.Minute,
queryLookback: 20 * time.Minute,
expectSamples: false,
},
{
name: "negative custom query lookback delta",
ts: lastDatapointTs.Add(20 * time.Minute),
ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond),
engineLookback: -10 * time.Minute,
queryLookback: 20 * time.Minute,
expectSamples: true,

View file

@ -237,7 +237,7 @@ eval instant at 5m sum by (group) (http_requests)
load 5m
testmetric {{}}
eval instant at 5m testmetric
eval instant at 0m testmetric
`,
expectedError: `error in eval testmetric (line 5): unexpected metric {__name__="testmetric"} in result, has value {count:0, sum:0}`,
},

View file

@ -250,7 +250,7 @@ clear
load 5m
http_requests{job="api-server", instance="0", group="production"} 0+10x10
http_requests{job="api-server", instance="1", group="production"} 0+20x10
http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
http_requests{job="app-server", instance="0", group="production"} 0+50x10
@ -337,32 +337,32 @@ load 5m
version{job="app-server", instance="0", group="canary"} 7
version{job="app-server", instance="1", group="canary"} 7
eval instant at 5m count_values("version", version)
eval instant at 1m count_values("version", version)
{version="6"} 5
{version="7"} 2
{version="8"} 2
eval instant at 5m count_values(((("version"))), version)
eval instant at 1m count_values(((("version"))), version)
{version="6"} 5
{version="7"} 2
{version="8"} 2
eval instant at 5m count_values without (instance)("version", version)
eval instant at 1m count_values without (instance)("version", version)
{job="api-server", group="production", version="6"} 3
{job="api-server", group="canary", version="8"} 2
{job="app-server", group="production", version="6"} 2
{job="app-server", group="canary", version="7"} 2
# Overwrite label with output. Don't do this.
eval instant at 5m count_values without (instance)("job", version)
eval instant at 1m count_values without (instance)("job", version)
{job="6", group="production"} 5
{job="8", group="canary"} 2
{job="7", group="canary"} 2
# Overwrite label with output. Don't do this.
eval instant at 5m count_values by (job, group)("job", version)
eval instant at 1m count_values by (job, group)("job", version)
{job="6", group="production"} 5
{job="8", group="canary"} 2
{job="7", group="canary"} 2

View file

@ -76,45 +76,43 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] offset 20s @ 100)
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.
# Inner most sum=1+2+...+10=55.
# With [100s:25s] subquery, it's 55*5.
# With [100s:25s] subquery, it's 55*4.
eval instant at 100s sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)
{job="1"} 275
{job="1"} 220
# Nested subqueries with different timestamps on both.
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.
# Sum of innermost subquery is 275 as above. The outer subquery repeats it 4 times.
# Sum of innermost subquery is 220 as above. The outer subquery repeats it 3 times.
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)[3s:1s] @ 3000)
{job="1"} 1100
{job="1"} 660
# Testing the inner subquery timestamp since vector selector does not have @.
# Inner sum for subquery [100s:25s] @ 50 are
# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=4+5=9.
# This sum of 11 is repeated 4 times by outer subquery.
# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=5.
# This sum of 7 is repeated 3 times by outer subquery.
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 50)[3s:1s] @ 200)
{job="1"} 44
{job="1"} 21
# Inner sum for subquery [100s:25s] @ 200 are
# at 100=9+10, at 125=12, at 150=14+15, at 175=17, at 200=19+20.
# This sum of 116 is repeated 4 times by outer subquery.
# at 125=12, at 150=15, at 175=17, at 200=20.
# This sum of 64 is repeated 3 times by outer subquery.
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 200)[3s:1s] @ 50)
{job="1"} 464
{job="1"} 192
# Nested subqueries with timestamp only on outer subquery.
# Outer most subquery:
# at 900=783
# inner subquery: at 870=87+86+85, at 880=88+87+86, at 890=89+88+87
# at 925=537
# inner subquery: at 895=89+88, at 905=90+89, at 915=90+91
# at 950=828
# inner subquery: at 920=92+91+90, at 930=93+92+91, at 940=94+93+92
# at 975=567
# inner subquery: at 945=94+93, at 955=95+94, at 965=96+95
# at 1000=873
# inner subquery: at 970=97+96+95, at 980=98+97+96, at 990=99+98+97
# at 925=360
# inner subquery: at 905=90+89, at 915=91+90
# at 950=372
# inner subquery: at 930=93+92, at 940=94+93
# at 975=380
# inner subquery: at 955=95+94, at 965=96+95
# at 1000=392
# inner subquery: at 980=98+97, at 990=99+98
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[20s])[20s:10s] offset 10s)[100s:25s] @ 1000)
{job="1"} 3588
{job="1"} 1504
# minute is counted on the value of the sample.
eval instant at 10s minute(metric @ 1500)
@ -137,32 +135,32 @@ eval instant at 15m timestamp(timestamp(metric{job="1"} @ 10))
# minute is counted on the value of the sample.
eval instant at 0s sum_over_time(minute(metric @ 1500)[100s:10s])
{job="1"} 22
{job="2"} 55
{job="1"} 20
{job="2"} 50
# If nothing passed, minute() takes eval time.
# Here the eval time is determined by the subquery.
# [50m:1m] at 6000, i.e. 100m, is 50m to 100m.
# sum=50+51+52+...+59+0+1+2+...+40.
# sum=51+52+...+59+0+1+2+...+40.
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000)
{} 1315
# sum=46+47+...+59+0+1+2+...+35.
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m)
{} 1365
# sum=45+46+47+...+59+0+1+2+...+35.
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m)
{} 1410
# time() is the eval time which is determined by subquery here.
# 2900+2901+...+3000 = (3000*3001 - 2899*2900)/2.
# 2901+...+3000 = (3000*3001 - 2899*2900)/2.
eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000)
{} 297950
{} 295050
# 2300+2301+...+2400 = (2400*2401 - 2299*2300)/2.
# 2301+...+2400 = (2400*2401 - 2299*2300)/2.
eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000 offset 600s)
{} 237350
{} 235050
# timestamp() takes the time of the sample and not the evaluation time.
eval instant at 0s sum_over_time(timestamp(metric{job="1"} @ 10)[100s:10s] @ 3000)
{job="1"} 110
{job="1"} 100
# The result of inner timestamp() will have the timestamp as the
# eval time, hence entire expression is not step invariant and depends on eval time.

View file

@ -6,6 +6,8 @@ load 5m
# Tests for resets().
eval instant at 50m resets(http_requests[5m])
eval instant at 50m resets(http_requests[10m])
{path="/foo"} 0
{path="/bar"} 0
{path="/biz"} 0
@ -16,6 +18,11 @@ eval instant at 50m resets(http_requests[20m])
{path="/biz"} 0
eval instant at 50m resets(http_requests[30m])
{path="/foo"} 1
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m resets(http_requests[32m])
{path="/foo"} 2
{path="/bar"} 1
{path="/biz"} 0
@ -29,28 +36,30 @@ eval instant at 50m resets(nonexistent_metric[50m])
# Tests for changes().
eval instant at 50m changes(http_requests[5m])
eval instant at 50m changes(http_requests[6m])
{path="/foo"} 0
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m changes(http_requests[20m])
{path="/foo"} 3
{path="/bar"} 3
{path="/foo"} 2
{path="/bar"} 2
{path="/biz"} 0
eval instant at 50m changes(http_requests[30m])
{path="/foo"} 4
{path="/bar"} 5
{path="/biz"} 1
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
eval instant at 50m changes(http_requests[50m])
{path="/foo"} 8
{path="/bar"} 9
{path="/foo"} 7
{path="/bar"} 8
{path="/biz"} 1
eval instant at 50m changes((http_requests[50m]))
{path="/foo"} 8
{path="/bar"} 9
{path="/foo"} 7
{path="/bar"} 8
{path="/biz"} 1
eval instant at 50m changes(nonexistent_metric[50m])
@ -61,7 +70,7 @@ load 5m
x{a="b"} NaN NaN NaN
x{a="c"} 0 NaN 0
eval instant at 15m changes(x[15m])
eval instant at 15m changes(x[20m])
{a="b"} 0
{a="c"} 2
@ -70,14 +79,14 @@ clear
# Tests for increase().
load 5m
http_requests{path="/foo"} 0+10x10
http_requests{path="/bar"} 0+10x5 0+10x5
http_requests{path="/bar"} 0+18x5 0+18x5
http_requests{path="/dings"} 10+10x10
http_requests{path="/bumms"} 1+10x10
# Tests for increase().
eval instant at 50m increase(http_requests[50m])
{path="/foo"} 100
{path="/bar"} 90
{path="/bar"} 160
{path="/dings"} 100
{path="/bumms"} 100
@ -90,7 +99,7 @@ eval instant at 50m increase(http_requests[50m])
# value, and therefore the extrapolation happens only by 30s.
eval instant at 50m increase(http_requests[100m])
{path="/foo"} 100
{path="/bar"} 90
{path="/bar"} 162
{path="/dings"} 105
{path="/bumms"} 101
@ -110,15 +119,17 @@ clear
# Tests for rate().
load 5m
testcounter_reset_middle 0+10x4 0+10x5
testcounter_reset_middle 0+27x4 0+27x5
testcounter_reset_end 0+10x9 0 10
# Counter resets at in the middle of range are handled correctly by rate().
eval instant at 50m rate(testcounter_reset_middle[50m])
{} 0.03
{} 0.08
# Counter resets at end of range are ignored by rate().
eval instant at 50m rate(testcounter_reset_end[5m])
eval instant at 50m rate(testcounter_reset_end[6m])
{} 0
clear
@ -237,18 +248,18 @@ eval instant at 50m deriv(testcounter_reset_middle[100m])
# intercept at t=3000: 38.63636363636364
# intercept at t=3000+3600: 76.81818181818181
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600)
{} 76.81818181818181
{} 70
# intercept at t = 3000+3600 = 6600
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
{} 76.81818181818181
# intercept at t = 600+3600 = 4200
eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
{} 51.36363636363637
# intercept at t = 4200+3600 = 7800
eval instant at 70m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
eval instant at 70m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
{} 89.54545454545455
# With http_requests, there is a sample value exactly at the end of
@ -456,7 +467,7 @@ load 5m
http_requests{job="api-server", instance="1", group="production"} 0+20x10
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
http_requests{job="app-server", instance="0", group="production"} 0+50x10
http_requests{job="app-server", instance="1", group="production"} 0+60x10
http_requests{job="app-server", instance="0", group="canary"} 0+70x10
@ -491,7 +502,7 @@ load 5m
http_requests{job="api-server", instance="1", group="production"} 0+20x10
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
http_requests{job="app-server", instance="0", group="production"} 0+50x10
http_requests{job="app-server", instance="1", group="production"} 0+60x10
http_requests{job="app-server", instance="0", group="canary"} 0+70x10
@ -677,10 +688,10 @@ load 10s
metric9 -9.988465674311579e+307 -9.988465674311579e+307 -9.988465674311579e+307
metric10 -9.988465674311579e+307 9.988465674311579e+307
eval instant at 1m avg_over_time(metric[1m])
eval instant at 55s avg_over_time(metric[1m])
{} 3
eval instant at 1m sum_over_time(metric[1m])/count_over_time(metric[1m])
eval instant at 55s sum_over_time(metric[1m])/count_over_time(metric[1m])
{} 3
eval instant at 1m avg_over_time(metric2[1m])
@ -748,8 +759,8 @@ eval instant at 1m avg_over_time(metric8[1m])
{} 9.988465674311579e+307
# This overflows float64.
eval instant at 1m sum_over_time(metric8[1m])/count_over_time(metric8[1m])
{} Inf
eval instant at 1m sum_over_time(metric8[2m])/count_over_time(metric8[2m])
{} +Inf
eval instant at 1m avg_over_time(metric9[1m])
{} -9.988465674311579e+307
@ -758,10 +769,16 @@ eval instant at 1m avg_over_time(metric9[1m])
eval instant at 1m sum_over_time(metric9[1m])/count_over_time(metric9[1m])
{} -Inf
eval instant at 1m avg_over_time(metric10[1m])
eval instant at 45s avg_over_time(metric10[1m])
{} 0
eval instant at 1m sum_over_time(metric10[1m])/count_over_time(metric10[1m])
eval instant at 1m avg_over_time(metric10[2m])
{} 0
eval instant at 45s sum_over_time(metric10[1m])/count_over_time(metric10[1m])
{} 0
eval instant at 1m sum_over_time(metric10[2m])/count_over_time(metric10[2m])
{} 0
# Test if very big intermediate values cause loss of detail.
@ -769,7 +786,7 @@ clear
load 10s
metric 1 1e100 1 -1e100
eval instant at 1m sum_over_time(metric[1m])
eval instant at 1m sum_over_time(metric[2m])
{} 2
# Tests for stddev_over_time and stdvar_over_time.
@ -777,13 +794,13 @@ clear
load 10s
metric 0 8 8 2 3
eval instant at 1m stdvar_over_time(metric[1m])
eval instant at 1m stdvar_over_time(metric[2m])
{} 10.56
eval instant at 1m stddev_over_time(metric[1m])
eval instant at 1m stddev_over_time(metric[2m])
{} 3.249615
eval instant at 1m stddev_over_time((metric[1m]))
eval instant at 1m stddev_over_time((metric[2m]))
{} 3.249615
# Tests for stddev_over_time and stdvar_over_time #4927.
@ -813,42 +830,42 @@ load 10s
data{test="three samples"} 0 1 2
data{test="uneven samples"} 0 1 4
eval instant at 1m quantile_over_time(0, data[1m])
eval instant at 1m quantile_over_time(0, data[2m])
{test="two samples"} 0
{test="three samples"} 0
{test="uneven samples"} 0
eval instant at 1m quantile_over_time(0.5, data[1m])
eval instant at 1m quantile_over_time(0.5, data[2m])
{test="two samples"} 0.5
{test="three samples"} 1
{test="uneven samples"} 1
eval instant at 1m quantile_over_time(0.75, data[1m])
eval instant at 1m quantile_over_time(0.75, data[2m])
{test="two samples"} 0.75
{test="three samples"} 1.5
{test="uneven samples"} 2.5
eval instant at 1m quantile_over_time(0.8, data[1m])
eval instant at 1m quantile_over_time(0.8, data[2m])
{test="two samples"} 0.8
{test="three samples"} 1.6
{test="uneven samples"} 2.8
eval instant at 1m quantile_over_time(1, data[1m])
eval instant at 1m quantile_over_time(1, data[2m])
{test="two samples"} 1
{test="three samples"} 2
{test="uneven samples"} 4
eval instant at 1m quantile_over_time(-1, data[1m])
eval instant at 1m quantile_over_time(-1, data[2m])
{test="two samples"} -Inf
{test="three samples"} -Inf
{test="uneven samples"} -Inf
eval instant at 1m quantile_over_time(2, data[1m])
eval instant at 1m quantile_over_time(2, data[2m])
{test="two samples"} +Inf
{test="three samples"} +Inf
{test="uneven samples"} +Inf
eval instant at 1m (quantile_over_time(2, (data[1m])))
eval instant at 1m (quantile_over_time(2, (data[2m])))
{test="two samples"} +Inf
{test="three samples"} +Inf
{test="uneven samples"} +Inf
@ -956,21 +973,21 @@ load 10s
data{type="some_nan3"} NaN 0 1
data{type="only_nan"} NaN NaN NaN
eval instant at 1m min_over_time(data[1m])
eval instant at 1m min_over_time(data[2m])
{type="numbers"} 0
{type="some_nan"} 0
{type="some_nan2"} 1
{type="some_nan3"} 0
{type="only_nan"} NaN
eval instant at 1m max_over_time(data[1m])
eval instant at 1m max_over_time(data[2m])
{type="numbers"} 3
{type="some_nan"} 2
{type="some_nan2"} 2
{type="some_nan3"} 1
{type="only_nan"} NaN
eval instant at 1m last_over_time(data[1m])
eval instant at 1m last_over_time(data[2m])
data{type="numbers"} 3
data{type="some_nan"} NaN
data{type="some_nan2"} 1
@ -1063,13 +1080,19 @@ eval instant at 1m absent_over_time(httpd_log_lines_total[30s])
{} 1
eval instant at 15m absent_over_time(http_requests[5m])
eval instant at 16m absent_over_time(http_requests[5m])
{} 1
eval instant at 15m absent_over_time(http_requests[10m])
eval instant at 16m absent_over_time(http_requests[6m])
{} 1
eval instant at 16m absent_over_time(http_requests[16m])
eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m])
{} 1
eval instant at 16m absent_over_time(httpd_handshake_failures_total[2m])
eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m])
@ -1125,17 +1148,18 @@ eval instant at 0m present_over_time(httpd_log_lines_total[30s])
eval instant at 1m present_over_time(httpd_log_lines_total[30s])
eval instant at 15m present_over_time(http_requests[5m])
eval instant at 15m present_over_time(http_requests[10m])
{instance="127.0.0.1", job="httpd", path="/bar"} 1
{instance="127.0.0.1", job="httpd", path="/foo"} 1
eval instant at 16m present_over_time(http_requests[5m])
eval instant at 16m present_over_time(http_requests[6m])
eval instant at 16m present_over_time(http_requests[16m])
{instance="127.0.0.1", job="httpd", path="/bar"} 1
{instance="127.0.0.1", job="httpd", path="/foo"} 1
eval instant at 16m present_over_time(httpd_handshake_failures_total[1m])
{instance="127.0.0.1", job="node"} 1
eval instant at 16m present_over_time({instance="127.0.0.1"}[5m])
{instance="127.0.0.1",job="node"} 1
@ -1156,59 +1180,59 @@ load 5m
exp_root_log{l="x"} 10
exp_root_log{l="y"} 20
eval instant at 5m exp(exp_root_log)
eval instant at 1m exp(exp_root_log)
{l="x"} 22026.465794806718
{l="y"} 485165195.4097903
eval instant at 5m exp(exp_root_log - 10)
eval instant at 1m exp(exp_root_log - 10)
{l="y"} 22026.465794806718
{l="x"} 1
eval instant at 5m exp(exp_root_log - 20)
eval instant at 1m exp(exp_root_log - 20)
{l="x"} 4.5399929762484854e-05
{l="y"} 1
eval instant at 5m ln(exp_root_log)
eval instant at 1m ln(exp_root_log)
{l="x"} 2.302585092994046
{l="y"} 2.995732273553991
eval instant at 5m ln(exp_root_log - 10)
eval instant at 1m ln(exp_root_log - 10)
{l="y"} 2.302585092994046
{l="x"} -Inf
eval instant at 5m ln(exp_root_log - 20)
eval instant at 1m ln(exp_root_log - 20)
{l="y"} -Inf
{l="x"} NaN
eval instant at 5m exp(ln(exp_root_log))
eval instant at 1m exp(ln(exp_root_log))
{l="y"} 20
{l="x"} 10
eval instant at 5m sqrt(exp_root_log)
eval instant at 1m sqrt(exp_root_log)
{l="x"} 3.1622776601683795
{l="y"} 4.47213595499958
eval instant at 5m log2(exp_root_log)
eval instant at 1m log2(exp_root_log)
{l="x"} 3.3219280948873626
{l="y"} 4.321928094887363
eval instant at 5m log2(exp_root_log - 10)
eval instant at 1m log2(exp_root_log - 10)
{l="y"} 3.3219280948873626
{l="x"} -Inf
eval instant at 5m log2(exp_root_log - 20)
eval instant at 1m log2(exp_root_log - 20)
{l="x"} NaN
{l="y"} -Inf
eval instant at 5m log10(exp_root_log)
eval instant at 1m log10(exp_root_log)
{l="x"} 1
{l="y"} 1.301029995663981
eval instant at 5m log10(exp_root_log - 10)
eval instant at 1m log10(exp_root_log - 10)
{l="y"} 1
{l="x"} -Inf
eval instant at 5m log10(exp_root_log - 20)
eval instant at 1m log10(exp_root_log - 20)
{l="x"} NaN
{l="y"} -Inf

View file

@ -93,15 +93,15 @@ eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
{start="negative"} 0.3
# More realistic with rates.
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m]))
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m]))
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m]))
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m]))
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m]))
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m]))
{start="positive"} 0.72
{start="negative"} 0.3
@ -125,58 +125,58 @@ eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m]))
{} 5
# Aggregated histogram: Everything in one.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.1277777777777778
# Aggregated histogram: Everything in one. Now with avg, which does not change anything.
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le))
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.075
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le))
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.12777777777777778
# Aggregated histogram: By instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
{instance="ins1"} 0.075
{instance="ins2"} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
{instance="ins1"} 0.1333333333
{instance="ins2"} 0.125
# Aggregated histogram: By job.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
{job="job1"} 0.1
{job="job2"} 0.0642857142857143
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
{job="job1"} 0.14
{job="job2"} 0.1125
# Aggregated histogram: By job and instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.1333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.1166666666666667
# The unaggregated histogram for comparison. Same result as the previous one.
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m]))
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m]))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m]))
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m]))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.13333333333333333
{instance="ins1", job="job2"} 0.1
@ -205,15 +205,15 @@ eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket)
{} 979.75
# Buckets with different representations of the same upper bound.
eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m]))
eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[10m]))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} NaN
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m]))
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[10m]))
{instance="ins1", job="job1"} 0.2
{instance="ins2", job="job1"} NaN
eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m]))
eval instant at 50m histogram_quantile(1, rate(mixed_bucket[10m]))
{instance="ins1", job="job1"} 0.2
{instance="ins2", job="job1"} NaN
@ -222,7 +222,7 @@ load 5m
empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10
empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m]))
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m]))
{instance="ins1", job="job1"} NaN
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set

View file

@ -2,55 +2,55 @@
load 5m
empty_histogram {{}}
eval instant at 5m empty_histogram
eval instant at 1m empty_histogram
{__name__="empty_histogram"} {{}}
eval instant at 5m histogram_count(empty_histogram)
eval instant at 1m histogram_count(empty_histogram)
{} 0
eval instant at 5m histogram_sum(empty_histogram)
eval instant at 1m histogram_sum(empty_histogram)
{} 0
eval instant at 5m histogram_avg(empty_histogram)
eval instant at 1m histogram_avg(empty_histogram)
{} NaN
eval instant at 5m histogram_fraction(-Inf, +Inf, empty_histogram)
eval instant at 1m histogram_fraction(-Inf, +Inf, empty_histogram)
{} NaN
eval instant at 5m histogram_fraction(0, 8, empty_histogram)
eval instant at 1m histogram_fraction(0, 8, empty_histogram)
{} NaN
clear
# buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4).
load 5m
single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}}
# histogram_count extracts the count property from the histogram.
eval instant at 5m histogram_count(single_histogram)
eval instant at 1m histogram_count(single_histogram)
{} 4
# histogram_sum extracts the sum property from the histogram.
eval instant at 5m histogram_sum(single_histogram)
eval instant at 1m histogram_sum(single_histogram)
{} 5
# histogram_avg calculates the average from sum and count properties.
eval instant at 5m histogram_avg(single_histogram)
eval instant at 1m histogram_avg(single_histogram)
{} 1.25
# We expect half of the values to fall in the range 1 < x <= 2.
eval instant at 5m histogram_fraction(1, 2, single_histogram)
eval instant at 1m histogram_fraction(1, 2, single_histogram)
{} 0.5
# We expect all values to fall in the range 0 < x <= 8.
eval instant at 5m histogram_fraction(0, 8, single_histogram)
eval instant at 1m histogram_fraction(0, 8, single_histogram)
{} 1
# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2.
eval instant at 5m histogram_quantile(0.5, single_histogram)
eval instant at 1m histogram_quantile(0.5, single_histogram)
{} 1.5
clear
# Repeat the same histogram 10 times.
load 5m
@ -88,7 +88,7 @@ eval instant at 50m histogram_fraction(1, 2, multi_histogram)
eval instant at 50m histogram_quantile(0.5, multi_histogram)
{} 1.5
clear
# Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket
# with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket
@ -133,14 +133,14 @@ eval instant at 50m histogram_quantile(0.5, incr_histogram)
{} 1.5
# Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum.
eval instant at 50m rate(incr_histogram[5m])
eval instant at 50m rate(incr_histogram[10m])
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
# Calculate the 50th percentile of observations over the last 10m.
eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m]))
{} 1.5
clear
# Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.:
# 0: 1 2 4 8 16 32 64 (higher resolution)
@ -166,77 +166,77 @@ eval instant at 5m histogram_avg(low_res_histogram)
eval instant at 5m histogram_fraction(1, 4, low_res_histogram)
{} 1
clear
# z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range
# 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket.
load 5m
single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}}
eval instant at 5m histogram_count(single_zero_histogram)
eval instant at 1m histogram_count(single_zero_histogram)
{} 1
eval instant at 5m histogram_sum(single_zero_histogram)
eval instant at 1m histogram_sum(single_zero_histogram)
{} 0.25
eval instant at 5m histogram_avg(single_zero_histogram)
eval instant at 1m histogram_avg(single_zero_histogram)
{} 0.25
# When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally
# distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the
# entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5.
eval instant at 5m histogram_fraction(-0.5, 0.5, single_zero_histogram)
eval instant at 1m histogram_fraction(-0.5, 0.5, single_zero_histogram)
{} 1
# Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5.
eval instant at 5m histogram_quantile(0.5, single_zero_histogram)
eval instant at 1m histogram_quantile(0.5, single_zero_histogram)
{} 0
clear
# Let's turn single_histogram upside-down.
load 5m
negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}}
eval instant at 5m histogram_count(negative_histogram)
eval instant at 1m histogram_count(negative_histogram)
{} 4
eval instant at 5m histogram_sum(negative_histogram)
eval instant at 1m histogram_sum(negative_histogram)
{} -5
eval instant at 5m histogram_avg(negative_histogram)
eval instant at 1m histogram_avg(negative_histogram)
{} -1.25
# We expect half of the values to fall in the range -2 < x <= -1.
eval instant at 5m histogram_fraction(-2, -1, negative_histogram)
eval instant at 1m histogram_fraction(-2, -1, negative_histogram)
{} 0.5
eval instant at 5m histogram_quantile(0.5, negative_histogram)
eval instant at 1m histogram_quantile(0.5, negative_histogram)
{} -1.5
clear
# Two histogram samples.
load 5m
two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}}
# We expect to see the newest sample.
eval instant at 10m histogram_count(two_samples_histogram)
eval instant at 5m histogram_count(two_samples_histogram)
{} 4
eval instant at 10m histogram_sum(two_samples_histogram)
eval instant at 5m histogram_sum(two_samples_histogram)
{} -4
eval instant at 10m histogram_avg(two_samples_histogram)
eval instant at 5m histogram_avg(two_samples_histogram)
{} -1
eval instant at 10m histogram_fraction(-2, -1, two_samples_histogram)
eval instant at 5m histogram_fraction(-2, -1, two_samples_histogram)
{} 0.5
eval instant at 10m histogram_quantile(0.5, two_samples_histogram)
eval instant at 5m histogram_quantile(0.5, two_samples_histogram)
{} -1.5
clear
# Add two histograms with negated data.
load 5m
@ -259,6 +259,8 @@ eval instant at 5m histogram_fraction(0, 4, balanced_histogram)
eval instant at 5m histogram_quantile(0.5, balanced_histogram)
{} 0.5
clear
# Add histogram to test sum(last_over_time) regression
load 5m
incr_sum_histogram{number="1"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:1 count:1 buckets:[1]}}x10
@ -270,6 +272,8 @@ eval instant at 50m histogram_sum(sum(incr_sum_histogram))
eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m])))
{} 30
clear
# Apply rate function to histogram.
load 15s
histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100
@ -280,6 +284,8 @@ eval instant at 5m rate(histogram_rate[45s])
eval range from 5m to 5m30s step 30s rate(histogram_rate[45s])
{} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1
clear
# Apply count and sum function to histogram.
load 10m
histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
@ -290,6 +296,8 @@ eval instant at 10m histogram_count(histogram_count_sum_2)
eval instant at 10m histogram_sum(histogram_count_sum_2)
{} 100
clear
# Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res).
load 10m
histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1
@ -300,6 +308,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_1)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1)
{} 1.163807968526718
clear
# Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res).
load 10m
histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1
@ -310,6 +320,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_2)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2)
{} 2.3971123370139447e-05
clear
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}.
load 10m
histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
@ -320,6 +332,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3)
{} 1844.4651144196398
clear
# Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}.
load 10m
histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1
@ -330,6 +344,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_4)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4)
{} 759352122.1939945
clear
# Apply stddev and stdvar function to histogram with {-10x10}.
load 10m
histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1
@ -340,6 +356,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_5)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5)
{} 1.725830020304794
clear
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}.
load 10m
histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
@ -350,6 +368,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_6)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6)
{} NaN
clear
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}.
load 10m
histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
@ -360,6 +380,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7)
{} NaN
clear
# Apply quantile function to histogram with all positive buckets with zero bucket.
load 10m
histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
@ -391,6 +413,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_1)
eval instant at 10m histogram_quantile(-1, histogram_quantile_1)
{} -Inf
clear
# Apply quantile function to histogram with all negative buckets with zero bucket.
load 10m
histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
@ -419,6 +443,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_2)
eval instant at 10m histogram_quantile(-1, histogram_quantile_2)
{} -Inf
clear
# Apply quantile function to histogram with both positive and negative buckets with zero bucket.
load 10m
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
@ -462,6 +488,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_3)
eval instant at 10m histogram_quantile(-1, histogram_quantile_3)
{} -Inf
clear
# Apply fraction function to empty histogram.
load 10m
histogram_fraction_1 {{}}x1
@ -469,6 +497,8 @@ load 10m
eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1)
{} NaN
clear
# Apply fraction function to histogram with positive and zero buckets.
load 10m
histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
@ -633,6 +663,8 @@ eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_3)
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3)
{} 1
clear
# Apply fraction function to histogram with both positive, negative and zero buckets.
load 10m
histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1

View file

@ -113,7 +113,7 @@ eval instant at 50m http_requests{job="api-server", group="canary"}
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="canary", instance="1", job="api-server"} 400
eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[5m]) * 5 * 60
eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[10m]) * 5 * 60
{group="canary", instance="0", job="api-server"} 330
{group="canary", instance="1", job="api-server"} 440
@ -308,65 +308,65 @@ load 5m
threshold{instance="abc",job="node",target="a@b.com"} 0
# Copy machine role to node variable.
eval instant at 5m node_role * on (instance) group_right (role) node_var
eval instant at 1m node_role * on (instance) group_right (role) node_var
{instance="abc",job="node",role="prometheus"} 2
eval instant at 5m node_var * on (instance) group_left (role) node_role
eval instant at 1m node_var * on (instance) group_left (role) node_role
{instance="abc",job="node",role="prometheus"} 2
eval instant at 5m node_var * ignoring (role) group_left (role) node_role
eval instant at 1m node_var * ignoring (role) group_left (role) node_role
{instance="abc",job="node",role="prometheus"} 2
eval instant at 5m node_role * ignoring (role) group_right (role) node_var
eval instant at 1m node_role * ignoring (role) group_right (role) node_var
{instance="abc",job="node",role="prometheus"} 2
# Copy machine role to node variable with instrumentation labels.
eval instant at 5m node_cpu * ignoring (role, mode) group_left (role) node_role
eval instant at 1m node_cpu * ignoring (role, mode) group_left (role) node_role
{instance="abc",job="node",mode="idle",role="prometheus"} 3
{instance="abc",job="node",mode="user",role="prometheus"} 1
eval instant at 5m node_cpu * on (instance) group_left (role) node_role
eval instant at 1m node_cpu * on (instance) group_left (role) node_role
{instance="abc",job="node",mode="idle",role="prometheus"} 3
{instance="abc",job="node",mode="user",role="prometheus"} 1
# Ratio of total.
eval instant at 5m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu)
eval instant at 1m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu)
{instance="abc",job="node",mode="idle"} .75
{instance="abc",job="node",mode="user"} .25
{instance="def",job="node",mode="idle"} .80
{instance="def",job="node",mode="user"} .20
eval instant at 5m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)
eval instant at 1m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)
{job="node",mode="idle"} 0.7857142857142857
{job="node",mode="user"} 0.21428571428571427
eval instant at 5m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu))
eval instant at 1m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu))
{} 1.0
eval instant at 5m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu)
eval instant at 1m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu)
{instance="abc",job="node",mode="idle"} .75
{instance="abc",job="node",mode="user"} .25
{instance="def",job="node",mode="idle"} .80
{instance="def",job="node",mode="user"} .20
eval instant at 5m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu)
eval instant at 1m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu)
{instance="abc",job="node",mode="idle"} .75
{instance="abc",job="node",mode="user"} .25
{instance="def",job="node",mode="idle"} .80
{instance="def",job="node",mode="user"} .20
eval instant at 5m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)
eval instant at 1m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)
{job="node",mode="idle"} 0.7857142857142857
{job="node",mode="user"} 0.21428571428571427
eval instant at 5m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu))
eval instant at 1m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu))
{} 1.0
# Copy over label from metric with no matching labels, without having to list cross-job target labels ('job' here).
eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0
eval instant at 1m node_cpu + on(dummy) group_left(foo) random*0
{instance="abc",job="node",mode="idle",foo="bar"} 3
{instance="abc",job="node",mode="user",foo="bar"} 1
{instance="def",job="node",mode="idle",foo="bar"} 8
@ -374,12 +374,12 @@ eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0
# Use threshold from metric, and copy over target.
eval instant at 5m node_cpu > on(job, instance) group_left(target) threshold
eval instant at 1m node_cpu > on(job, instance) group_left(target) threshold
node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3
node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1
# Use threshold from metric, and a default (1) if it's not present.
eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1))
eval instant at 1m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1))
node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3
node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1
node_cpu{instance="def",job="node",mode="idle"} 8
@ -387,37 +387,37 @@ eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or
# Check that binops drop the metric name.
eval instant at 5m node_cpu + 2
eval instant at 1m node_cpu + 2
{instance="abc",job="node",mode="idle"} 5
{instance="abc",job="node",mode="user"} 3
{instance="def",job="node",mode="idle"} 10
{instance="def",job="node",mode="user"} 4
eval instant at 5m node_cpu - 2
eval instant at 1m node_cpu - 2
{instance="abc",job="node",mode="idle"} 1
{instance="abc",job="node",mode="user"} -1
{instance="def",job="node",mode="idle"} 6
{instance="def",job="node",mode="user"} 0
eval instant at 5m node_cpu / 2
eval instant at 1m node_cpu / 2
{instance="abc",job="node",mode="idle"} 1.5
{instance="abc",job="node",mode="user"} 0.5
{instance="def",job="node",mode="idle"} 4
{instance="def",job="node",mode="user"} 1
eval instant at 5m node_cpu * 2
eval instant at 1m node_cpu * 2
{instance="abc",job="node",mode="idle"} 6
{instance="abc",job="node",mode="user"} 2
{instance="def",job="node",mode="idle"} 16
{instance="def",job="node",mode="user"} 4
eval instant at 5m node_cpu ^ 2
eval instant at 1m node_cpu ^ 2
{instance="abc",job="node",mode="idle"} 9
{instance="abc",job="node",mode="user"} 1
{instance="def",job="node",mode="idle"} 64
{instance="def",job="node",mode="user"} 4
eval instant at 5m node_cpu % 2
eval instant at 1m node_cpu % 2
{instance="abc",job="node",mode="idle"} 1
{instance="abc",job="node",mode="user"} 1
{instance="def",job="node",mode="idle"} 0
@ -432,14 +432,14 @@ load 5m
metricB{baz="meh"} 4
# On with no labels, for metrics with no common labels.
eval instant at 5m random + on() metricA
eval instant at 1m random + on() metricA
{} 5
# Ignoring with no labels is the same as no ignoring.
eval instant at 5m metricA + ignoring() metricB
eval instant at 1m metricA + ignoring() metricB
{baz="meh"} 7
eval instant at 5m metricA + metricB
eval instant at 1m metricA + metricB
{baz="meh"} 7
clear
@ -457,16 +457,16 @@ load 5m
test_total{instance="localhost"} 50
test_smaller{instance="localhost"} 10
eval instant at 5m test_total > bool test_smaller
eval instant at 1m test_total > bool test_smaller
{instance="localhost"} 1
eval instant at 5m test_total > test_smaller
eval instant at 1m test_total > test_smaller
test_total{instance="localhost"} 50
eval instant at 5m test_total < bool test_smaller
eval instant at 1m test_total < bool test_smaller
{instance="localhost"} 0
eval instant at 5m test_total < test_smaller
eval instant at 1m test_total < test_smaller
clear
@ -476,14 +476,14 @@ load 5m
trigx{} 20
trigNaN{} NaN
eval instant at 5m trigy atan2 trigx
eval instant at 1m trigy atan2 trigx
{} 0.4636476090008061
eval instant at 5m trigy atan2 trigNaN
eval instant at 1m trigy atan2 trigNaN
{} NaN
eval instant at 5m 10 atan2 20
eval instant at 1m 10 atan2 20
0.4636476090008061
eval instant at 5m 10 atan2 NaN
eval instant at 1m 10 atan2 NaN
NaN

View file

@ -1,18 +1,18 @@
# sum_over_time with all values
load 30s
load 15s
bar 0 1 10 100 1000
eval range from 0 to 2m step 1m sum_over_time(bar[30s])
eval range from 0 to 1m step 30s sum_over_time(bar[30s])
{} 0 11 1100
clear
# sum_over_time with trailing values
load 30s
load 15s
bar 0 1 10 100 1000 0 0 0 0
eval range from 0 to 2m step 1m sum_over_time(bar[30s])
{} 0 11 1100
{} 0 1100 0
clear
@ -21,15 +21,15 @@ load 30s
bar 0 1 10 100 1000 10000 100000 1000000 10000000
eval range from 0 to 4m step 1m sum_over_time(bar[30s])
{} 0 11 1100 110000 11000000
{} 0 10 1000 100000 10000000
clear
# sum_over_time with all values random
load 30s
load 15s
bar 5 17 42 2 7 905 51
eval range from 0 to 3m step 1m sum_over_time(bar[30s])
eval range from 0 to 90s step 30s sum_over_time(bar[30s])
{} 5 59 9 956
clear

View file

@ -14,10 +14,10 @@ eval instant at 40s metric
{__name__="metric"} 2
# It goes stale 5 minutes after the last sample.
eval instant at 330s metric
eval instant at 329s metric
{__name__="metric"} 2
eval instant at 331s metric
eval instant at 330s metric
# Range vector ignores stale sample.
@ -30,6 +30,8 @@ eval instant at 10s count_over_time(metric[1s])
eval instant at 20s count_over_time(metric[1s])
eval instant at 20s count_over_time(metric[10s])
eval instant at 20s count_over_time(metric[20s])
{} 1
@ -45,7 +47,7 @@ eval instant at 0s metric
eval instant at 150s metric
{__name__="metric"} 0
eval instant at 300s metric
eval instant at 299s metric
{__name__="metric"} 0
eval instant at 301s metric
eval instant at 300s metric

View file

@ -10,18 +10,18 @@ eval instant at 10s sum_over_time(metric[50s:5s])
# Every evaluation yields the last value, i.e. 2
eval instant at 5m sum_over_time(metric[50s:10s])
{} 12
{} 10
# Series becomes stale at 5m10s (5m after last sample)
# Hence subquery gets a single sample at 6m-50s=5m10s.
eval instant at 6m sum_over_time(metric[50s:10s])
# Hence subquery gets a single sample at 5m10s.
eval instant at 5m59s sum_over_time(metric[60s:10s])
{} 2
eval instant at 10s rate(metric[20s:10s])
{} 0.1
eval instant at 20s rate(metric[20s:5s])
{} 0.05
{} 0.06666666666666667
clear
@ -49,16 +49,16 @@ load 10s
metric3 0+3x1000
eval instant at 1000s sum_over_time(metric1[30s:10s])
{} 394
{} 297
# This is (394*2 - 100), because other than the last 100 at 1000s,
# This is (97 + 98*2 + 99*2 + 100), because other than 97@975s and 100@1000s,
# everything else is repeated with the 5s step.
eval instant at 1000s sum_over_time(metric1[30s:5s])
{} 688
{} 591
# Offset is aligned with the step.
# Offset is aligned with the step, so this is from [98@980s, 99@990s, 100@1000s].
eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s)
{} 394
{} 297
# Same result for different offsets due to step alignment.
eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s)
@ -78,16 +78,16 @@ eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s)
# Nested subqueries
eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s])
{} 0.4
{} 0.30000000000000004
eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s])
{} 0.8
{} 0.6000000000000001
eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s])
{} 1.2
{} 0.9
eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s])
{} 2.4
{} 1.8
clear
@ -100,16 +100,20 @@ load 7s
eval instant at 80s rate(metric[1m])
{} 2.517857143
# No extrapolation, [2@20, 144@80]: (144 - 2) / 60
eval instant at 80s rate(metric[1m:10s])
{} 2.366666667
# Extrapolated to range start for counter, [2@20, 144@80]: (144 - 2) / (80 - 20)
eval instant at 80s rate(metric[1m500ms:10s])
{} 2.3666666666666667
# Extrapolated to zero value for counter, [2@20, 144@80]: (144 - 0) / 61
eval instant at 80s rate(metric[1m1s:10s])
{} 2.360655737704918
# Only one value between 10s and 20s, 2@14
eval instant at 20s min_over_time(metric[10s])
{} 2
# min(1@10, 2@20)
eval instant at 20s min_over_time(metric[10s:10s])
# min(2@20)
eval instant at 20s min_over_time(metric[15s:10s])
{} 1
eval instant at 20m min_over_time(rate(metric[5m])[20m:1m])

View file

@ -5,92 +5,92 @@ load 5m
trig{l="y"} 20
trig{l="NaN"} NaN
eval instant at 5m sin(trig)
eval instant at 1m sin(trig)
{l="x"} -0.5440211108893699
{l="y"} 0.9129452507276277
{l="NaN"} NaN
eval instant at 5m cos(trig)
eval instant at 1m cos(trig)
{l="x"} -0.8390715290764524
{l="y"} 0.40808206181339196
{l="NaN"} NaN
eval instant at 5m tan(trig)
eval instant at 1m tan(trig)
{l="x"} 0.6483608274590867
{l="y"} 2.2371609442247427
{l="NaN"} NaN
eval instant at 5m asin(trig - 10.1)
eval instant at 1m asin(trig - 10.1)
{l="x"} -0.10016742116155944
{l="y"} NaN
{l="NaN"} NaN
eval instant at 5m acos(trig - 10.1)
eval instant at 1m acos(trig - 10.1)
{l="x"} 1.670963747956456
{l="y"} NaN
{l="NaN"} NaN
eval instant at 5m atan(trig)
eval instant at 1m atan(trig)
{l="x"} 1.4711276743037345
{l="y"} 1.5208379310729538
{l="NaN"} NaN
eval instant at 5m sinh(trig)
eval instant at 1m sinh(trig)
{l="x"} 11013.232920103324
{l="y"} 2.4258259770489514e+08
{l="NaN"} NaN
eval instant at 5m cosh(trig)
eval instant at 1m cosh(trig)
{l="x"} 11013.232920103324
{l="y"} 2.4258259770489514e+08
{l="NaN"} NaN
eval instant at 5m tanh(trig)
eval instant at 1m tanh(trig)
{l="x"} 0.9999999958776927
{l="y"} 1
{l="NaN"} NaN
eval instant at 5m asinh(trig)
eval instant at 1m asinh(trig)
{l="x"} 2.99822295029797
{l="y"} 3.6895038689889055
{l="NaN"} NaN
eval instant at 5m acosh(trig)
eval instant at 1m acosh(trig)
{l="x"} 2.993222846126381
{l="y"} 3.6882538673612966
{l="NaN"} NaN
eval instant at 5m atanh(trig - 10.1)
eval instant at 1m atanh(trig - 10.1)
{l="x"} -0.10033534773107522
{l="y"} NaN
{l="NaN"} NaN
eval instant at 5m rad(trig)
eval instant at 1m rad(trig)
{l="x"} 0.17453292519943295
{l="y"} 0.3490658503988659
{l="NaN"} NaN
eval instant at 5m rad(trig - 10)
eval instant at 1m rad(trig - 10)
{l="x"} 0
{l="y"} 0.17453292519943295
{l="NaN"} NaN
eval instant at 5m rad(trig - 20)
eval instant at 1m rad(trig - 20)
{l="x"} -0.17453292519943295
{l="y"} 0
{l="NaN"} NaN
eval instant at 5m deg(trig)
eval instant at 1m deg(trig)
{l="x"} 572.9577951308232
{l="y"} 1145.9155902616465
{l="NaN"} NaN
eval instant at 5m deg(trig - 10)
eval instant at 1m deg(trig - 10)
{l="x"} 0
{l="y"} 572.9577951308232
{l="NaN"} NaN
eval instant at 5m deg(trig - 20)
eval instant at 1m deg(trig - 20)
{l="x"} -572.9577951308232
{l="y"} 0
{l="NaN"} NaN