matrixIterSlice shall drop float and histogram at left bound

Signed-off-by: Zhang Zhanpeng <zhangzhanpeng.zzp@alibaba-inc.com>
This commit is contained in:
Zhang Zhanpeng 2024-04-09 00:46:52 +08:00
parent 1081e336a0
commit 381f8d52e0
10 changed files with 1248 additions and 174 deletions

View file

@ -113,7 +113,7 @@ tests:
- expr: count_over_time(fixed_data[1h])
eval_time: 1h
exp_samples:
- value: 61
- value: 60
- expr: timestamp(fixed_data)
eval_time: 1h
exp_samples:
@ -183,7 +183,7 @@ tests:
- expr: job:test:count_over_time1m
eval_time: 1m
exp_samples:
- value: 61
- value: 60
labels: 'job:test:count_over_time1m{job="test"}'
- expr: timestamp(job:test:count_over_time1m)
eval_time: 1m10s
@ -194,7 +194,7 @@ tests:
- expr: job:test:count_over_time1m
eval_time: 2m
exp_samples:
- value: 61
- value: 60
labels: 'job:test:count_over_time1m{job="test"}'
- expr: timestamp(job:test:count_over_time1m)
eval_time: 2m59s999ms

View file

@ -2207,20 +2207,20 @@ func (ev *evaluator) matrixIterSlice(
mintFloats, mintHistograms := mint, mint
// First floats...
if len(floats) > 0 && floats[len(floats)-1].T >= mint {
if len(floats) > 0 && floats[len(floats)-1].T > mint {
// There is an overlap between previous and current ranges, retain common
// points. In most such cases:
// (a) the overlap is significantly larger than the eval step; and/or
// (b) the number of samples is relatively small.
// so a linear search will be as fast as a binary search.
var drop int
for drop = 0; floats[drop].T < mint; drop++ {
for drop = 0; floats[drop].T <= mint; drop++ {
}
ev.currentSamples -= drop
copy(floats, floats[drop:])
floats = floats[:len(floats)-drop]
// Only append points with timestamps after the last timestamp we have.
mintFloats = floats[len(floats)-1].T + 1
mintFloats = floats[len(floats)-1].T
} else {
ev.currentSamples -= len(floats)
if floats != nil {
@ -2229,14 +2229,14 @@ func (ev *evaluator) matrixIterSlice(
}
// ...then the same for histograms. TODO(beorn7): Use generics?
if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint {
if len(histograms) > 0 && histograms[len(histograms)-1].T > mint {
// There is an overlap between previous and current ranges, retain common
// points. In most such cases:
// (a) the overlap is significantly larger than the eval step; and/or
// (b) the number of samples is relatively small.
// so a linear search will be as fast as a binary search.
var drop int
for drop = 0; histograms[drop].T < mint; drop++ {
for drop = 0; histograms[drop].T <= mint; drop++ {
}
// Rotate the buffer around the drop index so that points before mint can be
// reused to store new histograms.
@ -2247,7 +2247,7 @@ func (ev *evaluator) matrixIterSlice(
histograms = histograms[:len(histograms)-drop]
ev.currentSamples -= totalHPointSize(histograms)
// Only append points with timestamps after the last timestamp we have.
mintHistograms = histograms[len(histograms)-1].T + 1
mintHistograms = histograms[len(histograms)-1].T
} else {
ev.currentSamples -= totalHPointSize(histograms)
if histograms != nil {
@ -2271,7 +2271,7 @@ loop:
case chunkenc.ValFloatHistogram, chunkenc.ValHistogram:
t := buf.AtT()
// Values in the buffer are guaranteed to be smaller than maxt.
if t >= mintHistograms {
if t > mintHistograms {
if histograms == nil {
histograms = getMatrixSelectorHPoints()
}
@ -2297,7 +2297,7 @@ loop:
continue loop
}
// Values in the buffer are guaranteed to be smaller than maxt.
if t >= mintFloats {
if t > mintFloats {
ev.currentSamples++
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env))

File diff suppressed because it is too large Load diff

View file

@ -76,45 +76,43 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] offset 20s @ 100)
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.
# Inner most sum=1+2+...+10=55.
# With [100s:25s] subquery, it's 55*5.
# With [100s:25s] subquery, it's 55*4.
eval instant at 100s sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)
{job="1"} 275
{job="1"} 220
# Nested subqueries with different timestamps on both.
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.
# Sum of innermost subquery is 275 as above. The outer subquery repeats it 4 times.
# Sum of innermost subquery is 220 as above. The outer subquery repeats it 3 times.
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)[3s:1s] @ 3000)
{job="1"} 1100
{job="1"} 660
# Testing the inner subquery timestamp since vector selector does not have @.
# Inner sum for subquery [100s:25s] @ 50 are
# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=4+5=9.
# This sum of 11 is repeated 4 times by outer subquery.
# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=5.
# This sum of 7 is repeated 3 times by outer subquery.
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 50)[3s:1s] @ 200)
{job="1"} 44
{job="1"} 21
# Inner sum for subquery [100s:25s] @ 200 are
# at 100=9+10, at 125=12, at 150=14+15, at 175=17, at 200=19+20.
# This sum of 116 is repeated 4 times by outer subquery.
# at 125=12, at 150=15, at 175=17, at 200=20.
# This sum of 64 is repeated 3 times by outer subquery.
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 200)[3s:1s] @ 50)
{job="1"} 464
{job="1"} 192
# Nested subqueries with timestamp only on outer subquery.
# Outer most subquery:
# at 900=783
# inner subquery: at 870=87+86+85, at 880=88+87+86, at 890=89+88+87
# at 925=537
# inner subquery: at 895=89+88, at 905=90+89, at 915=90+91
# at 950=828
# inner subquery: at 920=92+91+90, at 930=93+92+91, at 940=94+93+92
# at 975=567
# inner subquery: at 945=94+93, at 955=95+94, at 965=96+95
# at 1000=873
# inner subquery: at 970=97+96+95, at 980=98+97+96, at 990=99+98+97
# at 925=360
# inner subquery: at 905=90+89, at 915=91+90
# at 950=372
# inner subquery: at 930=93+92, at 940=94+93
# at 975=380
# inner subquery: at 955=95+94, at 965=96+95
# at 1000=392
# inner subquery: at 980=98+97, at 990=99+98
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[20s])[20s:10s] offset 10s)[100s:25s] @ 1000)
{job="1"} 3588
{job="1"} 1504
# minute is counted on the value of the sample.
eval instant at 10s minute(metric @ 1500)
@ -137,32 +135,32 @@ eval instant at 15m timestamp(timestamp(metric{job="1"} @ 10))
# minute is counted on the value of the sample.
eval instant at 0s sum_over_time(minute(metric @ 1500)[100s:10s])
{job="1"} 22
{job="2"} 55
{job="1"} 20
{job="2"} 50
# If nothing passed, minute() takes eval time.
# Here the eval time is determined by the subquery.
# [50m:1m] at 6000, i.e. 100m, is 50m to 100m.
# sum=50+51+52+...+59+0+1+2+...+40.
# sum=51+52+...+59+0+1+2+...+40.
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000)
{} 1315
# sum=46+47+...+59+0+1+2+...+35.
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m)
{} 1365
# sum=45+46+47+...+59+0+1+2+...+35.
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m)
{} 1410
# time() is the eval time which is determined by subquery here.
# 2900+2901+...+3000 = (3000*3001 - 2899*2900)/2.
# 2901+...+3000 = (3000*3001 - 2899*2900)/2.
eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000)
{} 297950
{} 295050
# 2300+2301+...+2400 = (2400*2401 - 2299*2300)/2.
# 2301+...+2400 = (2400*2401 - 2299*2300)/2.
eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000 offset 600s)
{} 237350
{} 235050
# timestamp() takes the time of the sample and not the evaluation time.
eval instant at 0s sum_over_time(timestamp(metric{job="1"} @ 10)[100s:10s] @ 3000)
{job="1"} 110
{job="1"} 100
# The result of inner timestamp() will have the timestamp as the
# eval time, hence entire expression is not step invariant and depends on eval time.

View file

@ -6,9 +6,6 @@ load 5m
# Tests for resets().
eval instant at 50m resets(http_requests[5m])
{path="/foo"} 0
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m resets(http_requests[20m])
{path="/foo"} 1
@ -16,8 +13,8 @@ eval instant at 50m resets(http_requests[20m])
{path="/biz"} 0
eval instant at 50m resets(http_requests[30m])
{path="/foo"} 2
{path="/bar"} 1
{path="/foo"} 1
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m resets(http_requests[50m])
@ -29,28 +26,25 @@ eval instant at 50m resets(nonexistent_metric[50m])
# Tests for changes().
eval instant at 50m changes(http_requests[5m])
{path="/foo"} 0
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m changes(http_requests[20m])
{path="/foo"} 3
{path="/bar"} 3
{path="/foo"} 2
{path="/bar"} 2
{path="/biz"} 0
eval instant at 50m changes(http_requests[30m])
{path="/foo"} 4
{path="/bar"} 5
{path="/biz"} 1
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
eval instant at 50m changes(http_requests[50m])
{path="/foo"} 8
{path="/bar"} 9
{path="/foo"} 7
{path="/bar"} 8
{path="/biz"} 1
eval instant at 50m changes((http_requests[50m]))
{path="/foo"} 8
{path="/bar"} 9
{path="/foo"} 7
{path="/bar"} 8
{path="/biz"} 1
eval instant at 50m changes(nonexistent_metric[50m])
@ -63,7 +57,7 @@ load 5m
eval instant at 15m changes(x[15m])
{a="b"} 0
{a="c"} 2
{a="c"} 1
clear
@ -77,7 +71,7 @@ load 5m
# Tests for increase().
eval instant at 50m increase(http_requests[50m])
{path="/foo"} 100
{path="/bar"} 90
{path="/bar"} 88.88888888888889
{path="/dings"} 100
{path="/bumms"} 100
@ -115,11 +109,10 @@ load 5m
# Counter resets at in the middle of range are handled correctly by rate().
eval instant at 50m rate(testcounter_reset_middle[50m])
{} 0.03
{} 0.02962962962962963
# Counter resets at end of range are ignored by rate().
eval instant at 50m rate(testcounter_reset_end[5m])
{} 0
clear
@ -237,19 +230,19 @@ eval instant at 50m deriv(testcounter_reset_middle[100m])
# intercept at t=3000: 38.63636363636364
# intercept at t=3000+3600: 76.81818181818181
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600)
{} 76.81818181818181
{} 70
# intercept at t = 3000+3600 = 6600
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
{} 76.81818181818181
{} 70
# intercept at t = 600+3600 = 4200
eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
{} 51.36363636363637
{} 48.18181818181818
# intercept at t = 4200+3600 = 7800
eval instant at 70m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
{} 89.54545454545455
{} 80.9090909090909
# With http_requests, there is a sample value exactly at the end of
# the range, and it has exactly the predicted value, so predict_linear
@ -678,10 +671,10 @@ load 10s
metric10 -9.988465674311579e+307 9.988465674311579e+307
eval instant at 1m avg_over_time(metric[1m])
{} 3
{} 3.5
eval instant at 1m sum_over_time(metric[1m])/count_over_time(metric[1m])
{} 3
{} 3.5
eval instant at 1m avg_over_time(metric2[1m])
{} Inf
@ -748,8 +741,8 @@ eval instant at 1m avg_over_time(metric8[1m])
{} 9.988465674311579e+307
# This overflows float64.
eval instant at 1m sum_over_time(metric8[1m])/count_over_time(metric8[1m])
{} Inf
eval instant at 1m sum_over_time(metric8[2m])/count_over_time(metric8[2m])
{} +Inf
eval instant at 1m avg_over_time(metric9[1m])
{} -9.988465674311579e+307
@ -758,10 +751,16 @@ eval instant at 1m avg_over_time(metric9[1m])
eval instant at 1m sum_over_time(metric9[1m])/count_over_time(metric9[1m])
{} -Inf
eval instant at 1m avg_over_time(metric10[1m])
eval instant at 45s avg_over_time(metric10[1m])
{} 0
eval instant at 1m sum_over_time(metric10[1m])/count_over_time(metric10[1m])
eval instant at 1m avg_over_time(metric10[2m])
{} 0
eval instant at 45s sum_over_time(metric10[1m])/count_over_time(metric10[1m])
{} 0
eval instant at 1m sum_over_time(metric10[2m])/count_over_time(metric10[2m])
{} 0
# Test if very big intermediate values cause loss of detail.
@ -770,7 +769,7 @@ load 10s
metric 1 1e100 1 -1e100
eval instant at 1m sum_over_time(metric[1m])
{} 2
{} 1
# Tests for stddev_over_time and stdvar_over_time.
clear
@ -778,13 +777,13 @@ load 10s
metric 0 8 8 2 3
eval instant at 1m stdvar_over_time(metric[1m])
{} 10.56
{} 7.6875
eval instant at 1m stddev_over_time(metric[1m])
{} 3.249615
{} 2.7726341266023544
eval instant at 1m stddev_over_time((metric[1m]))
{} 3.249615
{} 2.7726341266023544
# Tests for stddev_over_time and stdvar_over_time #4927.
clear
@ -814,24 +813,24 @@ load 10s
data{test="uneven samples"} 0 1 4
eval instant at 1m quantile_over_time(0, data[1m])
{test="two samples"} 0
{test="three samples"} 0
{test="uneven samples"} 0
eval instant at 1m quantile_over_time(0.5, data[1m])
{test="two samples"} 0.5
{test="two samples"} 1
{test="three samples"} 1
{test="uneven samples"} 1
eval instant at 1m quantile_over_time(0.75, data[1m])
{test="two samples"} 0.75
eval instant at 1m quantile_over_time(0.5, data[1m])
{test="two samples"} 1
{test="three samples"} 1.5
{test="uneven samples"} 2.5
eval instant at 1m quantile_over_time(0.75, data[1m])
{test="two samples"} 1
{test="three samples"} 1.75
{test="uneven samples"} 3.25
eval instant at 1m quantile_over_time(0.8, data[1m])
{test="two samples"} 0.8
{test="three samples"} 1.6
{test="uneven samples"} 2.8
{test="two samples"} 1
{test="three samples"} 1.8
{test="uneven samples"} 3.4000000000000004
eval instant at 1m quantile_over_time(1, data[1m])
{test="two samples"} 1
@ -965,8 +964,8 @@ eval instant at 1m min_over_time(data[1m])
eval instant at 1m max_over_time(data[1m])
{type="numbers"} 3
{type="some_nan"} 2
{type="some_nan2"} 2
{type="some_nan"} 0
{type="some_nan2"} 1
{type="some_nan3"} 1
{type="only_nan"} NaN
@ -1063,13 +1062,19 @@ eval instant at 1m absent_over_time(httpd_log_lines_total[30s])
{} 1
eval instant at 15m absent_over_time(http_requests[5m])
eval instant at 16m absent_over_time(http_requests[5m])
{} 1
eval instant at 15m absent_over_time(http_requests[10m])
eval instant at 16m absent_over_time(http_requests[6m])
{} 1
eval instant at 16m absent_over_time(http_requests[16m])
eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m])
{} 1
eval instant at 16m absent_over_time(httpd_handshake_failures_total[2m])
eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m])
@ -1125,17 +1130,18 @@ eval instant at 0m present_over_time(httpd_log_lines_total[30s])
eval instant at 1m present_over_time(httpd_log_lines_total[30s])
eval instant at 15m present_over_time(http_requests[5m])
eval instant at 15m present_over_time(http_requests[10m])
{instance="127.0.0.1", job="httpd", path="/bar"} 1
{instance="127.0.0.1", job="httpd", path="/foo"} 1
eval instant at 16m present_over_time(http_requests[5m])
eval instant at 16m present_over_time(http_requests[6m])
eval instant at 16m present_over_time(http_requests[16m])
{instance="127.0.0.1", job="httpd", path="/bar"} 1
{instance="127.0.0.1", job="httpd", path="/foo"} 1
eval instant at 16m present_over_time(httpd_handshake_failures_total[1m])
{instance="127.0.0.1", job="node"} 1
eval instant at 16m present_over_time({instance="127.0.0.1"}[5m])
{instance="127.0.0.1",job="node"} 1

View file

@ -93,15 +93,15 @@ eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
{start="negative"} 0.3
# More realistic with rates.
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m]))
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m]))
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m]))
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m]))
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m]))
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m]))
{start="positive"} 0.72
{start="negative"} 0.3
@ -125,58 +125,58 @@ eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m]))
{} 5
# Aggregated histogram: Everything in one.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.1277777777777778
# Aggregated histogram: Everything in one. Now with avg, which does not change anything.
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le))
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.075
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le))
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.12777777777777778
# Aggregated histogram: By instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
{instance="ins1"} 0.075
{instance="ins2"} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
{instance="ins1"} 0.1333333333
{instance="ins2"} 0.125
# Aggregated histogram: By job.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
{job="job1"} 0.1
{job="job2"} 0.0642857142857143
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
{job="job1"} 0.14
{job="job2"} 0.1125
# Aggregated histogram: By job and instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.1333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.1166666666666667
# The unaggregated histogram for comparison. Same result as the previous one.
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m]))
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m]))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m]))
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m]))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.13333333333333333
{instance="ins1", job="job2"} 0.1
@ -205,15 +205,15 @@ eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket)
{} 979.75
# Buckets with different representations of the same upper bound.
eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m]))
eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[10m]))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} NaN
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m]))
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[10m]))
{instance="ins1", job="job1"} 0.2
{instance="ins2", job="job1"} NaN
eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m]))
eval instant at 50m histogram_quantile(1, rate(mixed_bucket[10m]))
{instance="ins1", job="job1"} 0.2
{instance="ins2", job="job1"} NaN
@ -222,7 +222,7 @@ load 5m
empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10
empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m]))
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m]))
{instance="ins1", job="job1"} NaN
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set

View file

@ -133,8 +133,8 @@ eval instant at 50m histogram_quantile(0.5, incr_histogram)
{} 1.5
# Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum.
eval instant at 50m rate(incr_histogram[5m])
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
eval instant at 50m rate(incr_histogram[10m])
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
# Calculate the 50th percentile of observations over the last 10m.
eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m]))

View file

@ -113,8 +113,8 @@ eval instant at 50m http_requests{job="api-server", group="canary"}
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="canary", instance="1", job="api-server"} 400
eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[5m]) * 5 * 60
{group="canary", instance="0", job="api-server"} 330
eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[10m]) * 5 * 60
{group="canary", instance="0", job="api-server"} 330
{group="canary", instance="1", job="api-server"} 440
eval instant at 50m rate(http_requests[25m]) * 25 * 60

View file

@ -30,6 +30,8 @@ eval instant at 10s count_over_time(metric[1s])
eval instant at 20s count_over_time(metric[1s])
eval instant at 20s count_over_time(metric[10s])
eval instant at 20s count_over_time(metric[20s])
{} 1

View file

@ -10,18 +10,18 @@ eval instant at 10s sum_over_time(metric[50s:5s])
# Every evaluation yields the last value, i.e. 2
eval instant at 5m sum_over_time(metric[50s:10s])
{} 12
{} 10
# Series becomes stale at 5m10s (5m after last sample)
# Hence subquery gets a single sample at 6m-50s=5m10s.
eval instant at 6m sum_over_time(metric[50s:10s])
# Hence subquery gets a single sample at 5m10s.
eval instant at 6m sum_over_time(metric[60s:10s])
{} 2
eval instant at 10s rate(metric[20s:10s])
{} 0.1
eval instant at 20s rate(metric[20s:5s])
{} 0.05
{} 0.06666666666666667
clear
@ -49,16 +49,16 @@ load 10s
metric3 0+3x1000
eval instant at 1000s sum_over_time(metric1[30s:10s])
{} 394
{} 297
# This is (394*2 - 100), because other than the last 100 at 1000s,
# everything else is repeated with the 5s step.
eval instant at 1000s sum_over_time(metric1[30s:5s])
{} 688
{} 591
# Offset is aligned with the step.
eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s)
{} 394
{} 297
# Same result for different offsets due to step alignment.
eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s)
@ -78,16 +78,16 @@ eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s)
# Nested subqueries
eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s])
{} 0.4
{} 0.30000000000000004
eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s])
{} 0.8
{} 0.6000000000000001
eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s])
{} 1.2
{} 0.9
eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s])
{} 2.4
{} 1.8
clear
@ -102,15 +102,15 @@ eval instant at 80s rate(metric[1m])
# No extrapolation, [2@20, 144@80]: (144 - 2) / 60
eval instant at 80s rate(metric[1m:10s])
{} 2.366666667
{} 2.4
# Only one value between 10s and 20s, 2@14
eval instant at 20s min_over_time(metric[10s])
{} 2
# min(1@10, 2@20)
# min(2@20)
eval instant at 20s min_over_time(metric[10s:10s])
{} 1
{} 2
eval instant at 20m min_over_time(rate(metric[5m])[20m:1m])
{} 0.12119047619047618