2015-05-26 10:01:34 -07:00
|
|
|
# Testdata for resets() and changes().
|
2015-05-26 08:47:52 -07:00
|
|
|
load 5m
|
|
|
|
http_requests{path="/foo"} 1 2 3 0 1 0 0 1 2 0
|
|
|
|
http_requests{path="/bar"} 1 2 3 4 5 1 2 3 4 5
|
2015-05-26 10:01:34 -07:00
|
|
|
http_requests{path="/biz"} 0 0 0 0 0 1 1 1 1 1
|
2015-05-26 08:47:52 -07:00
|
|
|
|
2015-05-26 10:01:34 -07:00
|
|
|
# Tests for resets().
|
2015-05-26 08:47:52 -07:00
|
|
|
eval instant at 50m resets(http_requests[5m])
|
2024-04-08 09:46:52 -07:00
|
|
|
|
|
|
|
eval instant at 50m resets(http_requests[10m])
|
2015-05-26 08:47:52 -07:00
|
|
|
{path="/foo"} 0
|
|
|
|
{path="/bar"} 0
|
2015-05-26 10:01:34 -07:00
|
|
|
{path="/biz"} 0
|
2015-05-26 08:47:52 -07:00
|
|
|
|
2024-07-18 05:25:00 -07:00
|
|
|
eval instant at 50m resets(http_requests[600])
|
2024-01-29 23:37:49 -08:00
|
|
|
{path="/foo"} 0
|
|
|
|
{path="/bar"} 0
|
|
|
|
{path="/biz"} 0
|
|
|
|
|
2015-05-26 08:47:52 -07:00
|
|
|
eval instant at 50m resets(http_requests[20m])
|
|
|
|
{path="/foo"} 1
|
|
|
|
{path="/bar"} 0
|
2015-05-26 10:01:34 -07:00
|
|
|
{path="/biz"} 0
|
2015-05-26 08:47:52 -07:00
|
|
|
|
|
|
|
eval instant at 50m resets(http_requests[30m])
|
2024-04-08 09:46:52 -07:00
|
|
|
{path="/foo"} 1
|
|
|
|
{path="/bar"} 0
|
|
|
|
{path="/biz"} 0
|
|
|
|
|
|
|
|
eval instant at 50m resets(http_requests[32m])
|
2015-05-26 08:47:52 -07:00
|
|
|
{path="/foo"} 2
|
|
|
|
{path="/bar"} 1
|
2015-05-26 10:01:34 -07:00
|
|
|
{path="/biz"} 0
|
2015-05-26 08:47:52 -07:00
|
|
|
|
|
|
|
eval instant at 50m resets(http_requests[50m])
|
|
|
|
{path="/foo"} 3
|
|
|
|
{path="/bar"} 1
|
2015-05-26 10:01:34 -07:00
|
|
|
{path="/biz"} 0
|
2015-05-26 08:47:52 -07:00
|
|
|
|
|
|
|
eval instant at 50m resets(nonexistent_metric[50m])
|
2015-05-26 10:01:34 -07:00
|
|
|
|
|
|
|
# Tests for changes().
|
|
|
|
eval instant at 50m changes(http_requests[5m])
|
2024-04-08 09:46:52 -07:00
|
|
|
|
|
|
|
eval instant at 50m changes(http_requests[6m])
|
2015-05-26 10:01:34 -07:00
|
|
|
{path="/foo"} 0
|
|
|
|
{path="/bar"} 0
|
|
|
|
{path="/biz"} 0
|
|
|
|
|
|
|
|
eval instant at 50m changes(http_requests[20m])
|
2024-04-08 09:46:52 -07:00
|
|
|
{path="/foo"} 2
|
|
|
|
{path="/bar"} 2
|
2015-05-26 10:01:34 -07:00
|
|
|
{path="/biz"} 0
|
|
|
|
|
|
|
|
eval instant at 50m changes(http_requests[30m])
|
2024-04-08 09:46:52 -07:00
|
|
|
{path="/foo"} 3
|
|
|
|
{path="/bar"} 4
|
|
|
|
{path="/biz"} 0
|
2015-05-26 10:01:34 -07:00
|
|
|
|
|
|
|
eval instant at 50m changes(http_requests[50m])
|
2024-04-08 09:46:52 -07:00
|
|
|
{path="/foo"} 7
|
|
|
|
{path="/bar"} 8
|
2015-05-26 10:01:34 -07:00
|
|
|
{path="/biz"} 1
|
|
|
|
|
2020-01-15 09:31:58 -08:00
|
|
|
eval instant at 50m changes((http_requests[50m]))
|
2024-04-08 09:46:52 -07:00
|
|
|
{path="/foo"} 7
|
|
|
|
{path="/bar"} 8
|
2020-01-15 09:31:58 -08:00
|
|
|
{path="/biz"} 1
|
|
|
|
|
2015-05-26 10:01:34 -07:00
|
|
|
eval instant at 50m changes(nonexistent_metric[50m])
|
2015-05-24 11:48:56 -07:00
|
|
|
|
2016-09-30 01:46:58 -07:00
|
|
|
clear
|
|
|
|
|
|
|
|
load 5m
|
|
|
|
x{a="b"} NaN NaN NaN
|
|
|
|
x{a="c"} 0 NaN 0
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 15m changes(x[20m])
|
2016-09-30 01:46:58 -07:00
|
|
|
{a="b"} 0
|
|
|
|
{a="c"} 2
|
2015-05-24 11:48:56 -07:00
|
|
|
|
|
|
|
clear
|
|
|
|
|
|
|
|
# Tests for increase().
|
|
|
|
load 5m
|
2024-11-21 05:20:38 -08:00
|
|
|
http_requests_total{path="/foo"} 0+10x10
|
|
|
|
http_requests_total{path="/bar"} 0+18x5 0+18x5
|
|
|
|
http_requests_total{path="/dings"} 10+10x10
|
|
|
|
http_requests_total{path="/bumms"} 1+10x10
|
2015-05-24 11:48:56 -07:00
|
|
|
|
|
|
|
# Tests for increase().
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m increase(http_requests_total[50m])
|
promql: Fix limiting of extrapolation to negative values
This is a bit tough to explain, but I'll try:
`rate` & friends have a sophisticated extrapolation algorithm.
Usually, we extrapolate the result to the total interval specified in
the range selector. However, if the first sample within the range is
too far away from the beginning of the interval, or if the last sample
within the range is too far away from the end of the interval, we
assume the series has just started half a sampling interval before the
first sample or after the last sample, respectively, and shorten the
extrapolation interval correspondingly. We calculate the sampling
interval by looking at the average time between samples within the
range, and we define "too far away" as "more than 110% of that
sampling interval".
However, if this algorithm leads to an extrapolated starting value
that is negative, we limit the start of the extrapolation interval to
the point where the extrapolated starting value is zero.
At least that was the intention.
What we actually implemented is the following: If extrapolating all
the way to the beginning of the total interval would lead to an
extrapolated negative value, we would only extrapolate to the zero
point as above, even if the algorithm above would have selected a
starting point that is just half a sampling interval before the first
sample and that starting point would not have an extrapolated negative
value. In other word: What was meant as a _limitation_ of the
extrapolation interval yielded a _longer_ extrapolation interval in
this case.
There is an exception to the case just described: If the increase of
the extrapolation interval is more than 110% of the sampling interval,
we suddenly drop back to only extrapolate to half a sampling interval.
This behavior can be nicely seen in the testcounter_zero_cutoff test,
where the rate goes up all the way to 0.7 and then jumps back to 0.6.
This commit changes the behavior to what was (presumably) intended
from the beginning: The extension of the extrapolation interval is
only limited if actually needed to prevent extrapolation to negative
values, but the "limitation" never leads to _more_ extrapolation
anymore.
The difference is subtle, and probably it never bothered anyone.
However, if you calculate a rate of a classic histograms, the old
behavior might create non-monotonic histograms as a result (because of
the jumps you can see nicely in the old version of the
testcounter_zero_cutoff test). With this fix, that doesn't happen
anymore.
Signed-off-by: beorn7 <beorn@grafana.com>
2024-03-06 15:55:28 -08:00
|
|
|
{path="/foo"} 100
|
2024-04-08 09:46:52 -07:00
|
|
|
{path="/bar"} 160
|
promql: Fix limiting of extrapolation to negative values
This is a bit tough to explain, but I'll try:
`rate` & friends have a sophisticated extrapolation algorithm.
Usually, we extrapolate the result to the total interval specified in
the range selector. However, if the first sample within the range is
too far away from the beginning of the interval, or if the last sample
within the range is too far away from the end of the interval, we
assume the series has just started half a sampling interval before the
first sample or after the last sample, respectively, and shorten the
extrapolation interval correspondingly. We calculate the sampling
interval by looking at the average time between samples within the
range, and we define "too far away" as "more than 110% of that
sampling interval".
However, if this algorithm leads to an extrapolated starting value
that is negative, we limit the start of the extrapolation interval to
the point where the extrapolated starting value is zero.
At least that was the intention.
What we actually implemented is the following: If extrapolating all
the way to the beginning of the total interval would lead to an
extrapolated negative value, we would only extrapolate to the zero
point as above, even if the algorithm above would have selected a
starting point that is just half a sampling interval before the first
sample and that starting point would not have an extrapolated negative
value. In other word: What was meant as a _limitation_ of the
extrapolation interval yielded a _longer_ extrapolation interval in
this case.
There is an exception to the case just described: If the increase of
the extrapolation interval is more than 110% of the sampling interval,
we suddenly drop back to only extrapolate to half a sampling interval.
This behavior can be nicely seen in the testcounter_zero_cutoff test,
where the rate goes up all the way to 0.7 and then jumps back to 0.6.
This commit changes the behavior to what was (presumably) intended
from the beginning: The extension of the extrapolation interval is
only limited if actually needed to prevent extrapolation to negative
values, but the "limitation" never leads to _more_ extrapolation
anymore.
The difference is subtle, and probably it never bothered anyone.
However, if you calculate a rate of a classic histograms, the old
behavior might create non-monotonic histograms as a result (because of
the jumps you can see nicely in the old version of the
testcounter_zero_cutoff test). With this fix, that doesn't happen
anymore.
Signed-off-by: beorn7 <beorn@grafana.com>
2024-03-06 15:55:28 -08:00
|
|
|
{path="/dings"} 100
|
|
|
|
{path="/bumms"} 100
|
|
|
|
|
|
|
|
# "foo" and "bar" are already at value 0 at t=0, so no extrapolation
|
|
|
|
# happens. "dings" has value 10 at t=0 and would reach 0 at t=-5m. The
|
|
|
|
# normal extrapolation by half a sample interval only goes to
|
|
|
|
# t=-2m30s, so that's not yet reaching a negative value and therefore
|
|
|
|
# chosen. However, "bumms" has value 1 at t=0 and would reach 0 at
|
|
|
|
# t=-30s. Here the extrapolation to t=-2m30s would reach a negative
|
|
|
|
# value, and therefore the extrapolation happens only by 30s.
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m increase(http_requests_total[100m])
|
promql: Fix limiting of extrapolation to negative values
This is a bit tough to explain, but I'll try:
`rate` & friends have a sophisticated extrapolation algorithm.
Usually, we extrapolate the result to the total interval specified in
the range selector. However, if the first sample within the range is
too far away from the beginning of the interval, or if the last sample
within the range is too far away from the end of the interval, we
assume the series has just started half a sampling interval before the
first sample or after the last sample, respectively, and shorten the
extrapolation interval correspondingly. We calculate the sampling
interval by looking at the average time between samples within the
range, and we define "too far away" as "more than 110% of that
sampling interval".
However, if this algorithm leads to an extrapolated starting value
that is negative, we limit the start of the extrapolation interval to
the point where the extrapolated starting value is zero.
At least that was the intention.
What we actually implemented is the following: If extrapolating all
the way to the beginning of the total interval would lead to an
extrapolated negative value, we would only extrapolate to the zero
point as above, even if the algorithm above would have selected a
starting point that is just half a sampling interval before the first
sample and that starting point would not have an extrapolated negative
value. In other word: What was meant as a _limitation_ of the
extrapolation interval yielded a _longer_ extrapolation interval in
this case.
There is an exception to the case just described: If the increase of
the extrapolation interval is more than 110% of the sampling interval,
we suddenly drop back to only extrapolate to half a sampling interval.
This behavior can be nicely seen in the testcounter_zero_cutoff test,
where the rate goes up all the way to 0.7 and then jumps back to 0.6.
This commit changes the behavior to what was (presumably) intended
from the beginning: The extension of the extrapolation interval is
only limited if actually needed to prevent extrapolation to negative
values, but the "limitation" never leads to _more_ extrapolation
anymore.
The difference is subtle, and probably it never bothered anyone.
However, if you calculate a rate of a classic histograms, the old
behavior might create non-monotonic histograms as a result (because of
the jumps you can see nicely in the old version of the
testcounter_zero_cutoff test). With this fix, that doesn't happen
anymore.
Signed-off-by: beorn7 <beorn@grafana.com>
2024-03-06 15:55:28 -08:00
|
|
|
{path="/foo"} 100
|
2024-04-08 09:46:52 -07:00
|
|
|
{path="/bar"} 162
|
promql: Fix limiting of extrapolation to negative values
This is a bit tough to explain, but I'll try:
`rate` & friends have a sophisticated extrapolation algorithm.
Usually, we extrapolate the result to the total interval specified in
the range selector. However, if the first sample within the range is
too far away from the beginning of the interval, or if the last sample
within the range is too far away from the end of the interval, we
assume the series has just started half a sampling interval before the
first sample or after the last sample, respectively, and shorten the
extrapolation interval correspondingly. We calculate the sampling
interval by looking at the average time between samples within the
range, and we define "too far away" as "more than 110% of that
sampling interval".
However, if this algorithm leads to an extrapolated starting value
that is negative, we limit the start of the extrapolation interval to
the point where the extrapolated starting value is zero.
At least that was the intention.
What we actually implemented is the following: If extrapolating all
the way to the beginning of the total interval would lead to an
extrapolated negative value, we would only extrapolate to the zero
point as above, even if the algorithm above would have selected a
starting point that is just half a sampling interval before the first
sample and that starting point would not have an extrapolated negative
value. In other word: What was meant as a _limitation_ of the
extrapolation interval yielded a _longer_ extrapolation interval in
this case.
There is an exception to the case just described: If the increase of
the extrapolation interval is more than 110% of the sampling interval,
we suddenly drop back to only extrapolate to half a sampling interval.
This behavior can be nicely seen in the testcounter_zero_cutoff test,
where the rate goes up all the way to 0.7 and then jumps back to 0.6.
This commit changes the behavior to what was (presumably) intended
from the beginning: The extension of the extrapolation interval is
only limited if actually needed to prevent extrapolation to negative
values, but the "limitation" never leads to _more_ extrapolation
anymore.
The difference is subtle, and probably it never bothered anyone.
However, if you calculate a rate of a classic histograms, the old
behavior might create non-monotonic histograms as a result (because of
the jumps you can see nicely in the old version of the
testcounter_zero_cutoff test). With this fix, that doesn't happen
anymore.
Signed-off-by: beorn7 <beorn@grafana.com>
2024-03-06 15:55:28 -08:00
|
|
|
{path="/dings"} 105
|
|
|
|
{path="/bumms"} 101
|
2015-11-28 13:13:41 -08:00
|
|
|
|
2015-10-09 09:58:43 -07:00
|
|
|
clear
|
|
|
|
|
2016-08-12 11:21:04 -07:00
|
|
|
# Test for increase() with counter reset.
|
|
|
|
# When the counter is reset, it always starts at 0.
|
|
|
|
# So the sequence 3 2 (decreasing counter = reset) is interpreted the same as 3 0 1 2.
|
|
|
|
# Prometheus assumes it missed the intermediate values 0 and 1.
|
|
|
|
load 5m
|
2024-11-21 05:20:38 -08:00
|
|
|
http_requests_total{path="/foo"} 0 1 2 3 2 3 4
|
2016-08-12 11:21:04 -07:00
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 30m increase(http_requests_total[30m])
|
2016-08-12 11:21:04 -07:00
|
|
|
{path="/foo"} 7
|
|
|
|
|
|
|
|
clear
|
|
|
|
|
2020-10-07 02:09:20 -07:00
|
|
|
# Tests for rate().
|
|
|
|
load 5m
|
2024-11-21 05:20:38 -08:00
|
|
|
testcounter_reset_middle_total 0+27x4 0+27x5
|
|
|
|
testcounter_reset_end_total 0+10x9 0 10
|
2020-10-07 02:09:20 -07:00
|
|
|
|
|
|
|
# Counter resets at in the middle of range are handled correctly by rate().
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m rate(testcounter_reset_middle_total[50m])
|
2024-04-08 09:46:52 -07:00
|
|
|
{} 0.08
|
2020-10-07 02:09:20 -07:00
|
|
|
|
|
|
|
# Counter resets at end of range are ignored by rate().
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m rate(testcounter_reset_end_total[5m])
|
2024-04-08 09:46:52 -07:00
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m rate(testcounter_reset_end_total[6m])
|
2020-10-07 02:09:20 -07:00
|
|
|
{} 0
|
|
|
|
|
|
|
|
clear
|
|
|
|
|
|
|
|
load 5m
|
2024-11-21 05:20:38 -08:00
|
|
|
calculate_rate_offset_total{x="a"} 0+10x10
|
|
|
|
calculate_rate_offset_total{x="b"} 0+20x10
|
|
|
|
calculate_rate_window_total 0+80x10
|
2020-10-07 02:09:20 -07:00
|
|
|
|
|
|
|
# Rates should calculate per-second rates.
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m rate(calculate_rate_window_total[50m])
|
2020-10-07 02:09:20 -07:00
|
|
|
{} 0.26666666666666666
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m rate(calculate_rate_offset_total[10m] offset 5m)
|
2020-10-07 02:09:20 -07:00
|
|
|
{x="a"} 0.03333333333333333
|
|
|
|
{x="b"} 0.06666666666666667
|
|
|
|
|
|
|
|
clear
|
|
|
|
|
|
|
|
load 4m
|
2024-11-21 05:20:38 -08:00
|
|
|
testcounter_zero_cutoff_total{start="0m"} 0+240x10
|
|
|
|
testcounter_zero_cutoff_total{start="1m"} 60+240x10
|
|
|
|
testcounter_zero_cutoff_total{start="2m"} 120+240x10
|
|
|
|
testcounter_zero_cutoff_total{start="3m"} 180+240x10
|
|
|
|
testcounter_zero_cutoff_total{start="4m"} 240+240x10
|
|
|
|
testcounter_zero_cutoff_total{start="5m"} 300+240x10
|
2020-10-07 02:09:20 -07:00
|
|
|
|
promql: Fix limiting of extrapolation to negative values
This is a bit tough to explain, but I'll try:
`rate` & friends have a sophisticated extrapolation algorithm.
Usually, we extrapolate the result to the total interval specified in
the range selector. However, if the first sample within the range is
too far away from the beginning of the interval, or if the last sample
within the range is too far away from the end of the interval, we
assume the series has just started half a sampling interval before the
first sample or after the last sample, respectively, and shorten the
extrapolation interval correspondingly. We calculate the sampling
interval by looking at the average time between samples within the
range, and we define "too far away" as "more than 110% of that
sampling interval".
However, if this algorithm leads to an extrapolated starting value
that is negative, we limit the start of the extrapolation interval to
the point where the extrapolated starting value is zero.
At least that was the intention.
What we actually implemented is the following: If extrapolating all
the way to the beginning of the total interval would lead to an
extrapolated negative value, we would only extrapolate to the zero
point as above, even if the algorithm above would have selected a
starting point that is just half a sampling interval before the first
sample and that starting point would not have an extrapolated negative
value. In other word: What was meant as a _limitation_ of the
extrapolation interval yielded a _longer_ extrapolation interval in
this case.
There is an exception to the case just described: If the increase of
the extrapolation interval is more than 110% of the sampling interval,
we suddenly drop back to only extrapolate to half a sampling interval.
This behavior can be nicely seen in the testcounter_zero_cutoff test,
where the rate goes up all the way to 0.7 and then jumps back to 0.6.
This commit changes the behavior to what was (presumably) intended
from the beginning: The extension of the extrapolation interval is
only limited if actually needed to prevent extrapolation to negative
values, but the "limitation" never leads to _more_ extrapolation
anymore.
The difference is subtle, and probably it never bothered anyone.
However, if you calculate a rate of a classic histograms, the old
behavior might create non-monotonic histograms as a result (because of
the jumps you can see nicely in the old version of the
testcounter_zero_cutoff test). With this fix, that doesn't happen
anymore.
Signed-off-by: beorn7 <beorn@grafana.com>
2024-03-06 15:55:28 -08:00
|
|
|
# Zero cutoff for left-side extrapolation happens until we
|
|
|
|
# reach half a sampling interval (2m). Beyond that, we only
|
|
|
|
# extrapolate by half a sampling interval.
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 10m rate(testcounter_zero_cutoff_total[20m])
|
2020-10-07 02:09:20 -07:00
|
|
|
{start="0m"} 0.5
|
|
|
|
{start="1m"} 0.55
|
|
|
|
{start="2m"} 0.6
|
promql: Fix limiting of extrapolation to negative values
This is a bit tough to explain, but I'll try:
`rate` & friends have a sophisticated extrapolation algorithm.
Usually, we extrapolate the result to the total interval specified in
the range selector. However, if the first sample within the range is
too far away from the beginning of the interval, or if the last sample
within the range is too far away from the end of the interval, we
assume the series has just started half a sampling interval before the
first sample or after the last sample, respectively, and shorten the
extrapolation interval correspondingly. We calculate the sampling
interval by looking at the average time between samples within the
range, and we define "too far away" as "more than 110% of that
sampling interval".
However, if this algorithm leads to an extrapolated starting value
that is negative, we limit the start of the extrapolation interval to
the point where the extrapolated starting value is zero.
At least that was the intention.
What we actually implemented is the following: If extrapolating all
the way to the beginning of the total interval would lead to an
extrapolated negative value, we would only extrapolate to the zero
point as above, even if the algorithm above would have selected a
starting point that is just half a sampling interval before the first
sample and that starting point would not have an extrapolated negative
value. In other word: What was meant as a _limitation_ of the
extrapolation interval yielded a _longer_ extrapolation interval in
this case.
There is an exception to the case just described: If the increase of
the extrapolation interval is more than 110% of the sampling interval,
we suddenly drop back to only extrapolate to half a sampling interval.
This behavior can be nicely seen in the testcounter_zero_cutoff test,
where the rate goes up all the way to 0.7 and then jumps back to 0.6.
This commit changes the behavior to what was (presumably) intended
from the beginning: The extension of the extrapolation interval is
only limited if actually needed to prevent extrapolation to negative
values, but the "limitation" never leads to _more_ extrapolation
anymore.
The difference is subtle, and probably it never bothered anyone.
However, if you calculate a rate of a classic histograms, the old
behavior might create non-monotonic histograms as a result (because of
the jumps you can see nicely in the old version of the
testcounter_zero_cutoff test). With this fix, that doesn't happen
anymore.
Signed-off-by: beorn7 <beorn@grafana.com>
2024-03-06 15:55:28 -08:00
|
|
|
{start="3m"} 0.6
|
|
|
|
{start="4m"} 0.6
|
2020-10-07 02:09:20 -07:00
|
|
|
{start="5m"} 0.6
|
|
|
|
|
|
|
|
# Normal half-interval cutoff for left-side extrapolation.
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m rate(testcounter_zero_cutoff_total[20m])
|
2020-10-07 02:09:20 -07:00
|
|
|
{start="0m"} 0.6
|
|
|
|
{start="1m"} 0.6
|
|
|
|
{start="2m"} 0.6
|
|
|
|
{start="3m"} 0.6
|
|
|
|
{start="4m"} 0.6
|
|
|
|
{start="5m"} 0.6
|
|
|
|
|
|
|
|
clear
|
|
|
|
|
2015-10-09 09:58:43 -07:00
|
|
|
# Tests for irate().
|
|
|
|
load 5m
|
2024-11-21 05:20:38 -08:00
|
|
|
http_requests_total{path="/foo"} 0+10x10
|
|
|
|
http_requests_total{path="/bar"} 0+10x5 0+10x5
|
2015-10-09 09:58:43 -07:00
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m irate(http_requests_total[50m])
|
2015-10-09 09:58:43 -07:00
|
|
|
{path="/foo"} .03333333333333333333
|
|
|
|
{path="/bar"} .03333333333333333333
|
|
|
|
|
|
|
|
# Counter reset.
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 30m irate(http_requests_total[50m])
|
2015-10-09 09:58:43 -07:00
|
|
|
{path="/foo"} .03333333333333333333
|
|
|
|
{path="/bar"} 0
|
2015-07-28 04:30:57 -07:00
|
|
|
|
|
|
|
clear
|
|
|
|
|
2016-08-08 01:02:58 -07:00
|
|
|
# Tests for delta().
|
|
|
|
load 5m
|
|
|
|
http_requests{path="/foo"} 0 50 100 150 200
|
2016-08-08 01:40:50 -07:00
|
|
|
http_requests{path="/bar"} 200 150 100 50 0
|
2016-08-08 01:02:58 -07:00
|
|
|
|
|
|
|
eval instant at 20m delta(http_requests[20m])
|
|
|
|
{path="/foo"} 200
|
2016-08-08 01:40:50 -07:00
|
|
|
{path="/bar"} -200
|
|
|
|
|
|
|
|
clear
|
|
|
|
|
|
|
|
# Tests for idelta().
|
|
|
|
load 5m
|
|
|
|
http_requests{path="/foo"} 0 50 100 150
|
|
|
|
http_requests{path="/bar"} 0 50 100 50
|
|
|
|
|
|
|
|
eval instant at 20m idelta(http_requests[20m])
|
|
|
|
{path="/foo"} 50
|
|
|
|
{path="/bar"} -50
|
2016-08-08 01:02:58 -07:00
|
|
|
|
|
|
|
clear
|
|
|
|
|
2015-07-28 04:30:57 -07:00
|
|
|
# Tests for deriv() and predict_linear().
|
|
|
|
load 5m
|
2024-11-21 05:20:38 -08:00
|
|
|
testcounter_reset_middle_total 0+10x4 0+10x5
|
|
|
|
http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10
|
2015-07-28 04:30:57 -07:00
|
|
|
|
2015-08-17 14:25:53 -07:00
|
|
|
# deriv should return the same as rate in simple cases.
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m rate(http_requests_total{group="canary", instance="1", job="app-server"}[50m])
|
2015-07-28 04:30:57 -07:00
|
|
|
{group="canary", instance="1", job="app-server"} 0.26666666666666666
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job="app-server"}[50m])
|
2015-07-28 04:30:57 -07:00
|
|
|
{group="canary", instance="1", job="app-server"} 0.26666666666666666
|
|
|
|
|
2015-08-17 14:25:53 -07:00
|
|
|
# deriv should return correct result.
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m deriv(testcounter_reset_middle_total[100m])
|
2015-07-28 04:30:57 -07:00
|
|
|
{} 0.010606060606060607
|
|
|
|
|
2015-08-17 14:25:53 -07:00
|
|
|
# predict_linear should return correct result.
|
2016-02-24 08:16:24 -08:00
|
|
|
# X/s = [ 0, 300, 600, 900,1200,1500,1800,2100,2400,2700,3000]
|
|
|
|
# Y = [ 0, 10, 20, 30, 40, 0, 10, 20, 30, 40, 50]
|
|
|
|
# sumX = 16500
|
|
|
|
# sumY = 250
|
|
|
|
# sumXY = 480000
|
|
|
|
# sumX2 = 34650000
|
|
|
|
# n = 11
|
|
|
|
# covXY = 105000
|
|
|
|
# varX = 9900000
|
|
|
|
# slope = 0.010606060606060607
|
|
|
|
# intercept at t=0: 6.818181818181818
|
|
|
|
# intercept at t=3000: 38.63636363636364
|
|
|
|
# intercept at t=3000+3600: 76.81818181818181
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 3600)
|
2024-04-08 09:46:52 -07:00
|
|
|
{} 70
|
2015-07-28 04:30:57 -07:00
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 1h)
|
2024-07-18 05:25:00 -07:00
|
|
|
{} 70
|
2024-03-04 05:57:28 -08:00
|
|
|
|
2021-01-20 02:57:39 -08:00
|
|
|
# intercept at t = 3000+3600 = 6600
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600)
|
2021-01-20 02:57:39 -08:00
|
|
|
{} 76.81818181818181
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 1h)
|
2024-03-04 05:57:28 -08:00
|
|
|
{} 76.81818181818181
|
|
|
|
|
2021-01-20 02:57:39 -08:00
|
|
|
# intercept at t = 600+3600 = 4200
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 10m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600)
|
2021-01-20 02:57:39 -08:00
|
|
|
{} 51.36363636363637
|
|
|
|
|
|
|
|
# intercept at t = 4200+3600 = 7800
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 70m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600)
|
2021-01-20 02:57:39 -08:00
|
|
|
{} 89.54545454545455
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
# With http_requests_total, there is a sample value exactly at the end of
|
2016-02-24 08:16:24 -08:00
|
|
|
# the range, and it has exactly the predicted value, so predict_linear
|
|
|
|
# can be emulated with deriv.
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 50m predict_linear(http_requests_total[50m], 3600) - (http_requests_total + deriv(http_requests_total[50m]) * 3600)
|
2015-07-28 04:30:57 -07:00
|
|
|
{group="canary", instance="1", job="app-server"} 0
|
|
|
|
|
2015-08-17 14:25:53 -07:00
|
|
|
clear
|
|
|
|
|
|
|
|
# Tests for label_replace.
|
|
|
|
load 5m
|
|
|
|
testmetric{src="source-value-10",dst="original-destination-value"} 0
|
|
|
|
testmetric{src="source-value-20",dst="original-destination-value"} 1
|
|
|
|
|
2015-09-01 06:35:20 -07:00
|
|
|
# label_replace does a full-string match and replace.
|
|
|
|
eval instant at 0m label_replace(testmetric, "dst", "destination-value-$1", "src", "source-value-(.*)")
|
2015-08-17 14:25:53 -07:00
|
|
|
testmetric{src="source-value-10",dst="destination-value-10"} 0
|
|
|
|
testmetric{src="source-value-20",dst="destination-value-20"} 1
|
|
|
|
|
2015-09-01 06:35:20 -07:00
|
|
|
# label_replace does not do a sub-string match.
|
|
|
|
eval instant at 0m label_replace(testmetric, "dst", "destination-value-$1", "src", "value-(.*)")
|
|
|
|
testmetric{src="source-value-10",dst="original-destination-value"} 0
|
|
|
|
testmetric{src="source-value-20",dst="original-destination-value"} 1
|
|
|
|
|
2015-08-17 14:25:53 -07:00
|
|
|
# label_replace works with multiple capture groups.
|
|
|
|
eval instant at 0m label_replace(testmetric, "dst", "$1-value-$2", "src", "(.*)-value-(.*)")
|
|
|
|
testmetric{src="source-value-10",dst="source-value-10"} 0
|
|
|
|
testmetric{src="source-value-20",dst="source-value-20"} 1
|
|
|
|
|
|
|
|
# label_replace does not overwrite the destination label if the source label
|
|
|
|
# does not exist.
|
|
|
|
eval instant at 0m label_replace(testmetric, "dst", "value-$1", "nonexistent-src", "source-value-(.*)")
|
|
|
|
testmetric{src="source-value-10",dst="original-destination-value"} 0
|
|
|
|
testmetric{src="source-value-20",dst="original-destination-value"} 1
|
|
|
|
|
|
|
|
# label_replace overwrites the destination label if the source label is empty,
|
|
|
|
# but matched.
|
|
|
|
eval instant at 0m label_replace(testmetric, "dst", "value-$1", "nonexistent-src", "(.*)")
|
|
|
|
testmetric{src="source-value-10",dst="value-"} 0
|
|
|
|
testmetric{src="source-value-20",dst="value-"} 1
|
|
|
|
|
|
|
|
# label_replace does not overwrite the destination label if the source label
|
|
|
|
# is not matched.
|
|
|
|
eval instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "non-matching-regex")
|
|
|
|
testmetric{src="source-value-10",dst="original-destination-value"} 0
|
|
|
|
testmetric{src="source-value-20",dst="original-destination-value"} 1
|
|
|
|
|
2020-01-15 09:31:58 -08:00
|
|
|
eval instant at 0m label_replace((((testmetric))), (("dst")), (("value-$1")), (("src")), (("non-matching-regex")))
|
|
|
|
testmetric{src="source-value-10",dst="original-destination-value"} 0
|
|
|
|
testmetric{src="source-value-20",dst="original-destination-value"} 1
|
|
|
|
|
2015-08-17 14:25:53 -07:00
|
|
|
# label_replace drops labels that are set to empty values.
|
2015-09-01 06:35:20 -07:00
|
|
|
eval instant at 0m label_replace(testmetric, "dst", "", "dst", ".*")
|
2015-08-17 14:25:53 -07:00
|
|
|
testmetric{src="source-value-10"} 0
|
|
|
|
testmetric{src="source-value-20"} 1
|
|
|
|
|
|
|
|
# label_replace fails when the regex is invalid.
|
|
|
|
eval_fail instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "(.*")
|
|
|
|
|
|
|
|
# label_replace fails when the destination label name is not a valid Prometheus label name.
|
|
|
|
eval_fail instant at 0m label_replace(testmetric, "invalid-label-name", "", "src", "(.*)")
|
|
|
|
|
|
|
|
# label_replace fails when there would be duplicated identical output label sets.
|
|
|
|
eval_fail instant at 0m label_replace(testmetric, "src", "", "", "")
|
2015-09-11 04:09:34 -07:00
|
|
|
|
|
|
|
clear
|
|
|
|
|
2017-04-14 03:57:05 -07:00
|
|
|
# Tests for vector, time and timestamp.
|
|
|
|
load 10s
|
|
|
|
metric 1 1
|
|
|
|
|
|
|
|
eval instant at 0s timestamp(metric)
|
|
|
|
{} 0
|
|
|
|
|
|
|
|
eval instant at 5s timestamp(metric)
|
|
|
|
{} 0
|
|
|
|
|
2020-11-09 04:51:50 -08:00
|
|
|
eval instant at 5s timestamp(((metric)))
|
|
|
|
{} 0
|
|
|
|
|
2017-04-14 03:57:05 -07:00
|
|
|
eval instant at 10s timestamp(metric)
|
|
|
|
{} 10
|
|
|
|
|
2020-01-15 09:31:58 -08:00
|
|
|
eval instant at 10s timestamp(((metric)))
|
|
|
|
{} 10
|
|
|
|
|
2017-06-16 06:51:22 -07:00
|
|
|
# Tests for label_join.
|
|
|
|
load 5m
|
|
|
|
testmetric{src="a",src1="b",src2="c",dst="original-destination-value"} 0
|
|
|
|
testmetric{src="d",src1="e",src2="f",dst="original-destination-value"} 1
|
|
|
|
|
|
|
|
# label_join joins all src values in order.
|
|
|
|
eval instant at 0m label_join(testmetric, "dst", "-", "src", "src1", "src2")
|
|
|
|
testmetric{src="a",src1="b",src2="c",dst="a-b-c"} 0
|
|
|
|
testmetric{src="d",src1="e",src2="f",dst="d-e-f"} 1
|
|
|
|
|
|
|
|
# label_join treats non existent src labels as empty strings.
|
|
|
|
eval instant at 0m label_join(testmetric, "dst", "-", "src", "src3", "src1")
|
|
|
|
testmetric{src="a",src1="b",src2="c",dst="a--b"} 0
|
|
|
|
testmetric{src="d",src1="e",src2="f",dst="d--e"} 1
|
|
|
|
|
|
|
|
# label_join overwrites the destination label even if the resulting dst label is empty string
|
|
|
|
eval instant at 0m label_join(testmetric, "dst", "", "emptysrc", "emptysrc1", "emptysrc2")
|
|
|
|
testmetric{src="a",src1="b",src2="c"} 0
|
|
|
|
testmetric{src="d",src1="e",src2="f"} 1
|
|
|
|
|
|
|
|
# test without src label for label_join
|
|
|
|
eval instant at 0m label_join(testmetric, "dst", ", ")
|
|
|
|
testmetric{src="a",src1="b",src2="c"} 0
|
|
|
|
testmetric{src="d",src1="e",src2="f"} 1
|
|
|
|
|
|
|
|
# test without dst label for label_join
|
|
|
|
load 5m
|
|
|
|
testmetric1{src="foo",src1="bar",src2="foobar"} 0
|
|
|
|
testmetric1{src="fizz",src1="buzz",src2="fizzbuzz"} 1
|
|
|
|
|
|
|
|
# label_join creates dst label if not present.
|
|
|
|
eval instant at 0m label_join(testmetric1, "dst", ", ", "src", "src1", "src2")
|
|
|
|
testmetric1{src="foo",src1="bar",src2="foobar",dst="foo, bar, foobar"} 0
|
|
|
|
testmetric1{src="fizz",src1="buzz",src2="fizzbuzz",dst="fizz, buzz, fizzbuzz"} 1
|
|
|
|
|
|
|
|
clear
|
|
|
|
|
2015-09-11 04:09:34 -07:00
|
|
|
# Tests for vector.
|
|
|
|
eval instant at 0m vector(1)
|
|
|
|
{} 1
|
|
|
|
|
2017-04-14 03:57:05 -07:00
|
|
|
eval instant at 0s vector(time())
|
|
|
|
{} 0
|
|
|
|
|
|
|
|
eval instant at 5s vector(time())
|
|
|
|
{} 5
|
|
|
|
|
2015-09-11 04:09:34 -07:00
|
|
|
eval instant at 60m vector(time())
|
|
|
|
{} 3600
|
2015-11-19 16:42:35 -08:00
|
|
|
|
|
|
|
|
2021-02-20 07:34:52 -08:00
|
|
|
# Tests for clamp_max, clamp_min(), and clamp().
|
2015-11-19 16:42:35 -08:00
|
|
|
load 5m
|
|
|
|
test_clamp{src="clamp-a"} -50
|
|
|
|
test_clamp{src="clamp-b"} 0
|
|
|
|
test_clamp{src="clamp-c"} 100
|
|
|
|
|
|
|
|
eval instant at 0m clamp_max(test_clamp, 75)
|
|
|
|
{src="clamp-a"} -50
|
|
|
|
{src="clamp-b"} 0
|
|
|
|
{src="clamp-c"} 75
|
|
|
|
|
|
|
|
eval instant at 0m clamp_min(test_clamp, -25)
|
|
|
|
{src="clamp-a"} -25
|
|
|
|
{src="clamp-b"} 0
|
|
|
|
{src="clamp-c"} 100
|
|
|
|
|
2021-02-20 07:34:52 -08:00
|
|
|
eval instant at 0m clamp(test_clamp, -25, 75)
|
|
|
|
{src="clamp-a"} -25
|
|
|
|
{src="clamp-b"} 0
|
|
|
|
{src="clamp-c"} 75
|
|
|
|
|
2015-11-19 16:42:35 -08:00
|
|
|
eval instant at 0m clamp_max(clamp_min(test_clamp, -20), 70)
|
|
|
|
{src="clamp-a"} -20
|
|
|
|
{src="clamp-b"} 0
|
|
|
|
{src="clamp-c"} 70
|
2015-12-22 04:38:32 -08:00
|
|
|
|
2020-01-15 09:31:58 -08:00
|
|
|
eval instant at 0m clamp_max((clamp_min(test_clamp, (-20))), (70))
|
|
|
|
{src="clamp-a"} -20
|
|
|
|
{src="clamp-b"} 0
|
|
|
|
{src="clamp-c"} 70
|
|
|
|
|
2021-02-20 07:34:52 -08:00
|
|
|
eval instant at 0m clamp(test_clamp, 0, NaN)
|
|
|
|
{src="clamp-a"} NaN
|
|
|
|
{src="clamp-b"} NaN
|
|
|
|
{src="clamp-c"} NaN
|
|
|
|
|
|
|
|
eval instant at 0m clamp(test_clamp, NaN, 0)
|
|
|
|
{src="clamp-a"} NaN
|
|
|
|
{src="clamp-b"} NaN
|
|
|
|
{src="clamp-c"} NaN
|
|
|
|
|
|
|
|
eval instant at 0m clamp(test_clamp, 5, -5)
|
|
|
|
|
2024-10-15 21:09:27 -07:00
|
|
|
clear
|
|
|
|
|
|
|
|
load 1m
|
|
|
|
mixed_metric {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}}
|
|
|
|
|
|
|
|
# clamp ignores any histograms
|
|
|
|
eval range from 0 to 5m step 1m clamp(mixed_metric, 2, 5)
|
|
|
|
{} _ 2 2 3
|
|
|
|
|
|
|
|
eval range from 0 to 5m step 1m clamp_min(mixed_metric, 2)
|
|
|
|
{} _ 2 2 3
|
|
|
|
|
|
|
|
eval range from 0 to 5m step 1m clamp_max(mixed_metric, 2)
|
|
|
|
{} _ 1 2 2
|
|
|
|
|
2021-02-20 07:34:52 -08:00
|
|
|
# Test cases for sgn.
|
|
|
|
clear
|
|
|
|
load 5m
|
|
|
|
test_sgn{src="sgn-a"} -Inf
|
|
|
|
test_sgn{src="sgn-b"} Inf
|
|
|
|
test_sgn{src="sgn-c"} NaN
|
|
|
|
test_sgn{src="sgn-d"} -50
|
|
|
|
test_sgn{src="sgn-e"} 0
|
|
|
|
test_sgn{src="sgn-f"} 100
|
|
|
|
|
|
|
|
eval instant at 0m sgn(test_sgn)
|
|
|
|
{src="sgn-a"} -1
|
|
|
|
{src="sgn-b"} 1
|
|
|
|
{src="sgn-c"} NaN
|
|
|
|
{src="sgn-d"} -1
|
|
|
|
{src="sgn-e"} 0
|
|
|
|
{src="sgn-f"} 1
|
|
|
|
|
2015-12-30 06:06:51 -08:00
|
|
|
|
|
|
|
# Tests for sort/sort_desc.
|
|
|
|
clear
|
|
|
|
load 5m
|
|
|
|
http_requests{job="api-server", instance="0", group="production"} 0+10x10
|
|
|
|
http_requests{job="api-server", instance="1", group="production"} 0+20x10
|
|
|
|
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
|
|
|
|
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
|
2024-04-08 09:46:52 -07:00
|
|
|
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
2015-12-30 06:06:51 -08:00
|
|
|
http_requests{job="app-server", instance="0", group="production"} 0+50x10
|
|
|
|
http_requests{job="app-server", instance="1", group="production"} 0+60x10
|
|
|
|
http_requests{job="app-server", instance="0", group="canary"} 0+70x10
|
|
|
|
http_requests{job="app-server", instance="1", group="canary"} 0+80x10
|
|
|
|
|
|
|
|
eval_ordered instant at 50m sort(http_requests)
|
|
|
|
http_requests{group="production", instance="0", job="api-server"} 100
|
|
|
|
http_requests{group="production", instance="1", job="api-server"} 200
|
|
|
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
|
|
|
http_requests{group="canary", instance="1", job="api-server"} 400
|
|
|
|
http_requests{group="production", instance="0", job="app-server"} 500
|
|
|
|
http_requests{group="production", instance="1", job="app-server"} 600
|
|
|
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
|
|
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
|
|
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
|
|
|
|
|
|
|
eval_ordered instant at 50m sort_desc(http_requests)
|
|
|
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
|
|
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
|
|
|
http_requests{group="production", instance="1", job="app-server"} 600
|
|
|
|
http_requests{group="production", instance="0", job="app-server"} 500
|
|
|
|
http_requests{group="canary", instance="1", job="api-server"} 400
|
|
|
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
|
|
|
http_requests{group="production", instance="1", job="api-server"} 200
|
|
|
|
http_requests{group="production", instance="0", job="api-server"} 100
|
|
|
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
2016-03-09 19:29:02 -08:00
|
|
|
|
2023-11-22 03:06:48 -08:00
|
|
|
# Tests for sort_by_label/sort_by_label_desc.
|
|
|
|
clear
|
|
|
|
load 5m
|
|
|
|
http_requests{job="api-server", instance="0", group="production"} 0+10x10
|
|
|
|
http_requests{job="api-server", instance="1", group="production"} 0+20x10
|
|
|
|
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
|
|
|
|
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
|
2024-04-08 09:46:52 -07:00
|
|
|
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
2023-11-22 03:06:48 -08:00
|
|
|
http_requests{job="app-server", instance="0", group="production"} 0+50x10
|
|
|
|
http_requests{job="app-server", instance="1", group="production"} 0+60x10
|
|
|
|
http_requests{job="app-server", instance="0", group="canary"} 0+70x10
|
|
|
|
http_requests{job="app-server", instance="1", group="canary"} 0+80x10
|
|
|
|
http_requests{job="api-server", instance="2", group="production"} 0+10x10
|
2024-01-16 16:34:09 -08:00
|
|
|
cpu_time_total{job="cpu", cpu="0"} 0+10x10
|
|
|
|
cpu_time_total{job="cpu", cpu="1"} 0+10x10
|
|
|
|
cpu_time_total{job="cpu", cpu="2"} 0+10x10
|
|
|
|
cpu_time_total{job="cpu", cpu="3"} 0+10x10
|
|
|
|
cpu_time_total{job="cpu", cpu="10"} 0+10x10
|
|
|
|
cpu_time_total{job="cpu", cpu="11"} 0+10x10
|
|
|
|
cpu_time_total{job="cpu", cpu="12"} 0+10x10
|
|
|
|
cpu_time_total{job="cpu", cpu="20"} 0+10x10
|
|
|
|
cpu_time_total{job="cpu", cpu="21"} 0+10x10
|
|
|
|
cpu_time_total{job="cpu", cpu="100"} 0+10x10
|
|
|
|
node_uname_info{job="node_exporter", instance="4m600", release="1.2.3"} 0+10x10
|
|
|
|
node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 0+10x10
|
|
|
|
node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 0+10x10
|
2023-11-22 03:06:48 -08:00
|
|
|
|
|
|
|
eval_ordered instant at 50m sort_by_label(http_requests, "instance")
|
|
|
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
|
|
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
2024-08-11 12:07:54 -07:00
|
|
|
http_requests{group="production", instance="0", job="api-server"} 100
|
|
|
|
http_requests{group="production", instance="0", job="app-server"} 500
|
2023-11-22 03:06:48 -08:00
|
|
|
http_requests{group="canary", instance="1", job="api-server"} 400
|
|
|
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
2024-08-11 12:07:54 -07:00
|
|
|
http_requests{group="production", instance="1", job="api-server"} 200
|
|
|
|
http_requests{group="production", instance="1", job="app-server"} 600
|
2023-11-22 03:06:48 -08:00
|
|
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
2024-08-11 12:07:54 -07:00
|
|
|
http_requests{group="production", instance="2", job="api-server"} 100
|
2023-11-22 03:06:48 -08:00
|
|
|
|
|
|
|
eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group")
|
|
|
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
|
|
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
|
|
|
http_requests{group="production", instance="0", job="api-server"} 100
|
|
|
|
http_requests{group="production", instance="0", job="app-server"} 500
|
|
|
|
http_requests{group="canary", instance="1", job="api-server"} 400
|
|
|
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
|
|
|
http_requests{group="production", instance="1", job="api-server"} 200
|
|
|
|
http_requests{group="production", instance="1", job="app-server"} 600
|
|
|
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
|
|
|
http_requests{group="production", instance="2", job="api-server"} 100
|
|
|
|
|
|
|
|
eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group")
|
|
|
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
|
|
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
|
|
|
http_requests{group="production", instance="0", job="api-server"} 100
|
|
|
|
http_requests{group="production", instance="0", job="app-server"} 500
|
|
|
|
http_requests{group="canary", instance="1", job="api-server"} 400
|
|
|
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
|
|
|
http_requests{group="production", instance="1", job="api-server"} 200
|
|
|
|
http_requests{group="production", instance="1", job="app-server"} 600
|
|
|
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
|
|
|
http_requests{group="production", instance="2", job="api-server"} 100
|
|
|
|
|
|
|
|
eval_ordered instant at 50m sort_by_label(http_requests, "group", "instance", "job")
|
|
|
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
|
|
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
|
|
|
http_requests{group="canary", instance="1", job="api-server"} 400
|
|
|
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
|
|
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
|
|
|
http_requests{group="production", instance="0", job="api-server"} 100
|
|
|
|
http_requests{group="production", instance="0", job="app-server"} 500
|
|
|
|
http_requests{group="production", instance="1", job="api-server"} 200
|
|
|
|
http_requests{group="production", instance="1", job="app-server"} 600
|
|
|
|
http_requests{group="production", instance="2", job="api-server"} 100
|
|
|
|
|
|
|
|
eval_ordered instant at 50m sort_by_label(http_requests, "job", "instance", "group")
|
|
|
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
|
|
|
http_requests{group="production", instance="0", job="api-server"} 100
|
|
|
|
http_requests{group="canary", instance="1", job="api-server"} 400
|
|
|
|
http_requests{group="production", instance="1", job="api-server"} 200
|
|
|
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
|
|
|
http_requests{group="production", instance="2", job="api-server"} 100
|
|
|
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
|
|
|
http_requests{group="production", instance="0", job="app-server"} 500
|
|
|
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
|
|
|
http_requests{group="production", instance="1", job="app-server"} 600
|
|
|
|
|
|
|
|
eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance")
|
|
|
|
http_requests{group="production", instance="2", job="api-server"} 100
|
|
|
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
|
|
|
http_requests{group="production", instance="1", job="app-server"} 600
|
|
|
|
http_requests{group="production", instance="1", job="api-server"} 200
|
2024-08-11 12:24:09 -07:00
|
|
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
|
|
|
http_requests{group="canary", instance="1", job="api-server"} 400
|
2023-11-22 03:06:48 -08:00
|
|
|
http_requests{group="production", instance="0", job="app-server"} 500
|
|
|
|
http_requests{group="production", instance="0", job="api-server"} 100
|
2024-08-11 12:24:09 -07:00
|
|
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
|
|
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
2023-11-22 03:06:48 -08:00
|
|
|
|
|
|
|
eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group")
|
|
|
|
http_requests{group="production", instance="2", job="api-server"} 100
|
|
|
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
|
|
|
http_requests{group="production", instance="1", job="app-server"} 600
|
|
|
|
http_requests{group="production", instance="1", job="api-server"} 200
|
|
|
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
|
|
|
http_requests{group="canary", instance="1", job="api-server"} 400
|
|
|
|
http_requests{group="production", instance="0", job="app-server"} 500
|
|
|
|
http_requests{group="production", instance="0", job="api-server"} 100
|
|
|
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
|
|
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
|
|
|
|
|
|
|
eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group", "job")
|
|
|
|
http_requests{group="production", instance="2", job="api-server"} 100
|
|
|
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
|
|
|
http_requests{group="production", instance="1", job="app-server"} 600
|
|
|
|
http_requests{group="production", instance="1", job="api-server"} 200
|
|
|
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
|
|
|
http_requests{group="canary", instance="1", job="api-server"} 400
|
|
|
|
http_requests{group="production", instance="0", job="app-server"} 500
|
|
|
|
http_requests{group="production", instance="0", job="api-server"} 100
|
|
|
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
|
|
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
|
|
|
|
2024-01-16 16:34:09 -08:00
|
|
|
eval_ordered instant at 50m sort_by_label(cpu_time_total, "cpu")
|
|
|
|
cpu_time_total{job="cpu", cpu="0"} 100
|
|
|
|
cpu_time_total{job="cpu", cpu="1"} 100
|
|
|
|
cpu_time_total{job="cpu", cpu="2"} 100
|
|
|
|
cpu_time_total{job="cpu", cpu="3"} 100
|
|
|
|
cpu_time_total{job="cpu", cpu="10"} 100
|
|
|
|
cpu_time_total{job="cpu", cpu="11"} 100
|
|
|
|
cpu_time_total{job="cpu", cpu="12"} 100
|
|
|
|
cpu_time_total{job="cpu", cpu="20"} 100
|
|
|
|
cpu_time_total{job="cpu", cpu="21"} 100
|
|
|
|
cpu_time_total{job="cpu", cpu="100"} 100
|
|
|
|
|
|
|
|
eval_ordered instant at 50m sort_by_label(node_uname_info, "instance")
|
|
|
|
node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100
|
|
|
|
node_uname_info{job="node_exporter", instance="4m600", release="1.2.3"} 100
|
|
|
|
node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100
|
|
|
|
|
|
|
|
eval_ordered instant at 50m sort_by_label(node_uname_info, "release")
|
|
|
|
node_uname_info{job="node_exporter", instance="4m600", release="1.2.3"} 100
|
|
|
|
node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100
|
|
|
|
node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100
|
|
|
|
|
2024-09-18 02:20:17 -07:00
|
|
|
# Tests for double_exponential_smoothing
|
2016-03-09 19:29:02 -08:00
|
|
|
clear
|
|
|
|
|
|
|
|
# positive trends
|
|
|
|
load 10s
|
|
|
|
http_requests{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000
|
|
|
|
http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000
|
|
|
|
http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000
|
|
|
|
http_requests{job="api-server", instance="1", group="canary"} 0+40x2000
|
|
|
|
|
2024-09-18 02:20:17 -07:00
|
|
|
eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1)
|
2016-03-09 19:29:02 -08:00
|
|
|
{job="api-server", instance="0", group="production"} 8000
|
|
|
|
{job="api-server", instance="1", group="production"} 16000
|
|
|
|
{job="api-server", instance="0", group="canary"} 24000
|
|
|
|
{job="api-server", instance="1", group="canary"} 32000
|
|
|
|
|
|
|
|
# negative trends
|
|
|
|
clear
|
|
|
|
load 10s
|
|
|
|
http_requests{job="api-server", instance="0", group="production"} 8000-10x1000
|
|
|
|
http_requests{job="api-server", instance="1", group="production"} 0-20x1000
|
|
|
|
http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300-80x1000
|
|
|
|
http_requests{job="api-server", instance="1", group="canary"} 0-40x1000 0+40x1000
|
|
|
|
|
2024-09-18 02:20:17 -07:00
|
|
|
eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1)
|
2016-03-09 19:29:02 -08:00
|
|
|
{job="api-server", instance="0", group="production"} 0
|
|
|
|
{job="api-server", instance="1", group="production"} -16000
|
|
|
|
{job="api-server", instance="0", group="canary"} 24000
|
|
|
|
{job="api-server", instance="1", group="canary"} -32000
|
2016-07-15 16:34:44 -07:00
|
|
|
|
2018-08-26 02:28:47 -07:00
|
|
|
# Tests for avg_over_time
|
|
|
|
clear
|
|
|
|
load 10s
|
|
|
|
metric 1 2 3 4 5
|
2020-07-13 08:30:50 -07:00
|
|
|
metric2 1 2 3 4 Inf
|
|
|
|
metric3 1 2 3 4 -Inf
|
|
|
|
metric4 1 2 3 Inf -Inf
|
|
|
|
metric5 Inf 0 Inf
|
|
|
|
metric5b Inf 0 Inf
|
|
|
|
metric5c Inf Inf Inf -Inf
|
|
|
|
metric6 1 2 3 -Inf -Inf
|
|
|
|
metric6b -Inf 0 -Inf
|
|
|
|
metric6c -Inf -Inf -Inf Inf
|
|
|
|
metric7 1 2 -Inf -Inf Inf
|
|
|
|
metric8 9.988465674311579e+307 9.988465674311579e+307
|
|
|
|
metric9 -9.988465674311579e+307 -9.988465674311579e+307 -9.988465674311579e+307
|
|
|
|
metric10 -9.988465674311579e+307 9.988465674311579e+307
|
2018-08-26 02:28:47 -07:00
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 55s avg_over_time(metric[1m])
|
2018-08-26 02:28:47 -07:00
|
|
|
{} 3
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 55s sum_over_time(metric[1m])/count_over_time(metric[1m])
|
2020-07-13 08:30:50 -07:00
|
|
|
{} 3
|
|
|
|
|
|
|
|
eval instant at 1m avg_over_time(metric2[1m])
|
|
|
|
{} Inf
|
|
|
|
|
|
|
|
eval instant at 1m sum_over_time(metric2[1m])/count_over_time(metric2[1m])
|
|
|
|
{} Inf
|
|
|
|
|
|
|
|
eval instant at 1m avg_over_time(metric3[1m])
|
|
|
|
{} -Inf
|
|
|
|
|
|
|
|
eval instant at 1m sum_over_time(metric3[1m])/count_over_time(metric3[1m])
|
|
|
|
{} -Inf
|
|
|
|
|
|
|
|
eval instant at 1m avg_over_time(metric4[1m])
|
|
|
|
{} NaN
|
|
|
|
|
|
|
|
eval instant at 1m sum_over_time(metric4[1m])/count_over_time(metric4[1m])
|
|
|
|
{} NaN
|
|
|
|
|
|
|
|
eval instant at 1m avg_over_time(metric5[1m])
|
|
|
|
{} Inf
|
|
|
|
|
|
|
|
eval instant at 1m sum_over_time(metric5[1m])/count_over_time(metric5[1m])
|
|
|
|
{} Inf
|
|
|
|
|
|
|
|
eval instant at 1m avg_over_time(metric5b[1m])
|
|
|
|
{} Inf
|
|
|
|
|
|
|
|
eval instant at 1m sum_over_time(metric5b[1m])/count_over_time(metric5b[1m])
|
|
|
|
{} Inf
|
|
|
|
|
|
|
|
eval instant at 1m avg_over_time(metric5c[1m])
|
|
|
|
{} NaN
|
|
|
|
|
|
|
|
eval instant at 1m sum_over_time(metric5c[1m])/count_over_time(metric5c[1m])
|
|
|
|
{} NaN
|
|
|
|
|
|
|
|
eval instant at 1m avg_over_time(metric6[1m])
|
|
|
|
{} -Inf
|
|
|
|
|
|
|
|
eval instant at 1m sum_over_time(metric6[1m])/count_over_time(metric6[1m])
|
|
|
|
{} -Inf
|
|
|
|
|
|
|
|
eval instant at 1m avg_over_time(metric6b[1m])
|
|
|
|
{} -Inf
|
|
|
|
|
|
|
|
eval instant at 1m sum_over_time(metric6b[1m])/count_over_time(metric6b[1m])
|
|
|
|
{} -Inf
|
|
|
|
|
|
|
|
eval instant at 1m avg_over_time(metric6c[1m])
|
|
|
|
{} NaN
|
|
|
|
|
|
|
|
eval instant at 1m sum_over_time(metric6c[1m])/count_over_time(metric6c[1m])
|
|
|
|
{} NaN
|
|
|
|
|
|
|
|
eval instant at 1m avg_over_time(metric7[1m])
|
|
|
|
{} NaN
|
|
|
|
|
|
|
|
eval instant at 1m sum_over_time(metric7[1m])/count_over_time(metric7[1m])
|
|
|
|
{} NaN
|
|
|
|
|
|
|
|
eval instant at 1m avg_over_time(metric8[1m])
|
|
|
|
{} 9.988465674311579e+307
|
|
|
|
|
|
|
|
# This overflows float64.
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m sum_over_time(metric8[2m])/count_over_time(metric8[2m])
|
|
|
|
{} +Inf
|
2020-07-13 08:30:50 -07:00
|
|
|
|
|
|
|
eval instant at 1m avg_over_time(metric9[1m])
|
|
|
|
{} -9.988465674311579e+307
|
|
|
|
|
|
|
|
# This overflows float64.
|
|
|
|
eval instant at 1m sum_over_time(metric9[1m])/count_over_time(metric9[1m])
|
|
|
|
{} -Inf
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 45s avg_over_time(metric10[1m])
|
2020-07-13 08:30:50 -07:00
|
|
|
{} 0
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m avg_over_time(metric10[2m])
|
|
|
|
{} 0
|
|
|
|
|
|
|
|
eval instant at 45s sum_over_time(metric10[1m])/count_over_time(metric10[1m])
|
|
|
|
{} 0
|
|
|
|
|
|
|
|
eval instant at 1m sum_over_time(metric10[2m])/count_over_time(metric10[2m])
|
2020-07-13 08:30:50 -07:00
|
|
|
{} 0
|
|
|
|
|
2024-05-08 03:47:03 -07:00
|
|
|
# Test if very big intermediate values cause loss of detail.
|
|
|
|
clear
|
|
|
|
load 10s
|
|
|
|
metric 1 1e100 1 -1e100
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m sum_over_time(metric[2m])
|
2024-05-08 03:47:03 -07:00
|
|
|
{} 2
|
|
|
|
|
2024-08-21 02:14:09 -07:00
|
|
|
eval instant at 1m avg_over_time(metric[2m])
|
2024-07-04 09:47:52 -07:00
|
|
|
{} 0.5
|
|
|
|
|
2016-07-15 16:34:44 -07:00
|
|
|
# Tests for stddev_over_time and stdvar_over_time.
|
|
|
|
clear
|
|
|
|
load 10s
|
|
|
|
metric 0 8 8 2 3
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m stdvar_over_time(metric[2m])
|
2016-07-15 16:34:44 -07:00
|
|
|
{} 10.56
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m stddev_over_time(metric[2m])
|
2016-07-15 16:34:44 -07:00
|
|
|
{} 3.249615
|
2016-07-08 05:22:22 -07:00
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m stddev_over_time((metric[2m]))
|
2020-01-15 09:31:58 -08:00
|
|
|
{} 3.249615
|
|
|
|
|
2018-08-26 02:28:47 -07:00
|
|
|
# Tests for stddev_over_time and stdvar_over_time #4927.
|
|
|
|
clear
|
|
|
|
load 10s
|
|
|
|
metric 1.5990505637277868 1.5990505637277868 1.5990505637277868
|
|
|
|
|
|
|
|
eval instant at 1m stdvar_over_time(metric[1m])
|
|
|
|
{} 0
|
|
|
|
|
|
|
|
eval instant at 1m stddev_over_time(metric[1m])
|
|
|
|
{} 0
|
|
|
|
|
2023-11-30 09:55:01 -08:00
|
|
|
# Tests for mad_over_time.
|
|
|
|
clear
|
|
|
|
load 10s
|
|
|
|
metric 4 6 2 1 999 1 2
|
|
|
|
|
|
|
|
eval instant at 70s mad_over_time(metric[70s])
|
|
|
|
{} 1
|
|
|
|
|
2016-07-08 05:22:22 -07:00
|
|
|
# Tests for quantile_over_time
|
|
|
|
clear
|
|
|
|
|
|
|
|
load 10s
|
|
|
|
data{test="two samples"} 0 1
|
|
|
|
data{test="three samples"} 0 1 2
|
|
|
|
data{test="uneven samples"} 0 1 4
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m quantile_over_time(0, data[2m])
|
2016-07-08 05:22:22 -07:00
|
|
|
{test="two samples"} 0
|
|
|
|
{test="three samples"} 0
|
|
|
|
{test="uneven samples"} 0
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m quantile_over_time(0.5, data[2m])
|
2016-07-08 05:22:22 -07:00
|
|
|
{test="two samples"} 0.5
|
|
|
|
{test="three samples"} 1
|
|
|
|
{test="uneven samples"} 1
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m quantile_over_time(0.75, data[2m])
|
2016-07-08 05:22:22 -07:00
|
|
|
{test="two samples"} 0.75
|
|
|
|
{test="three samples"} 1.5
|
|
|
|
{test="uneven samples"} 2.5
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m quantile_over_time(0.8, data[2m])
|
2016-07-08 05:22:22 -07:00
|
|
|
{test="two samples"} 0.8
|
|
|
|
{test="three samples"} 1.6
|
|
|
|
{test="uneven samples"} 2.8
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m quantile_over_time(1, data[2m])
|
2016-07-08 05:22:22 -07:00
|
|
|
{test="two samples"} 1
|
|
|
|
{test="three samples"} 2
|
|
|
|
{test="uneven samples"} 4
|
|
|
|
|
2024-07-01 00:31:53 -07:00
|
|
|
eval_warn instant at 1m quantile_over_time(-1, data[2m])
|
2016-07-08 05:22:22 -07:00
|
|
|
{test="two samples"} -Inf
|
|
|
|
{test="three samples"} -Inf
|
|
|
|
{test="uneven samples"} -Inf
|
|
|
|
|
2024-07-01 00:31:53 -07:00
|
|
|
eval_warn instant at 1m quantile_over_time(2, data[2m])
|
2016-07-08 05:22:22 -07:00
|
|
|
{test="two samples"} +Inf
|
|
|
|
{test="three samples"} +Inf
|
|
|
|
{test="uneven samples"} +Inf
|
2020-01-15 09:31:58 -08:00
|
|
|
|
2024-07-01 00:31:53 -07:00
|
|
|
eval_warn instant at 1m (quantile_over_time(2, (data[2m])))
|
2020-01-15 09:31:58 -08:00
|
|
|
{test="two samples"} +Inf
|
|
|
|
{test="three samples"} +Inf
|
|
|
|
{test="uneven samples"} +Inf
|
2016-08-22 12:57:33 -07:00
|
|
|
|
|
|
|
clear
|
|
|
|
|
|
|
|
# Test time-related functions.
|
2016-08-29 10:22:12 -07:00
|
|
|
eval instant at 0m year()
|
2016-08-22 12:57:33 -07:00
|
|
|
{} 1970
|
|
|
|
|
2018-02-08 03:39:13 -08:00
|
|
|
eval instant at 1ms time()
|
|
|
|
0.001
|
|
|
|
|
2020-10-07 02:09:20 -07:00
|
|
|
eval instant at 50m time()
|
|
|
|
3000
|
|
|
|
|
2016-09-12 15:12:43 -07:00
|
|
|
eval instant at 0m year(vector(1136239445))
|
|
|
|
{} 2006
|
|
|
|
|
2016-08-29 10:22:12 -07:00
|
|
|
eval instant at 0m month()
|
2016-08-22 12:57:33 -07:00
|
|
|
{} 1
|
|
|
|
|
2016-09-12 15:12:43 -07:00
|
|
|
eval instant at 0m month(vector(1136239445))
|
|
|
|
{} 1
|
|
|
|
|
2016-08-29 10:22:12 -07:00
|
|
|
eval instant at 0m day_of_month()
|
2016-08-22 12:57:33 -07:00
|
|
|
{} 1
|
|
|
|
|
2016-09-12 15:12:43 -07:00
|
|
|
eval instant at 0m day_of_month(vector(1136239445))
|
|
|
|
{} 2
|
|
|
|
|
2022-05-20 03:24:19 -07:00
|
|
|
eval instant at 0m day_of_year()
|
|
|
|
{} 1
|
|
|
|
|
|
|
|
eval instant at 0m day_of_year(vector(1136239445))
|
|
|
|
{} 2
|
|
|
|
|
2016-08-22 12:57:33 -07:00
|
|
|
# Thursday.
|
2016-08-29 10:22:12 -07:00
|
|
|
eval instant at 0m day_of_week()
|
2016-08-22 12:57:33 -07:00
|
|
|
{} 4
|
|
|
|
|
2016-09-12 15:12:43 -07:00
|
|
|
eval instant at 0m day_of_week(vector(1136239445))
|
|
|
|
{} 1
|
|
|
|
|
2016-08-29 10:22:12 -07:00
|
|
|
eval instant at 0m hour()
|
2016-08-22 12:57:33 -07:00
|
|
|
{} 0
|
|
|
|
|
2016-09-12 15:12:43 -07:00
|
|
|
eval instant at 0m hour(vector(1136239445))
|
|
|
|
{} 22
|
|
|
|
|
2016-09-12 12:29:44 -07:00
|
|
|
eval instant at 0m minute()
|
|
|
|
{} 0
|
|
|
|
|
2016-09-12 15:12:43 -07:00
|
|
|
eval instant at 0m minute(vector(1136239445))
|
|
|
|
{} 4
|
|
|
|
|
2016-08-22 12:57:33 -07:00
|
|
|
# 2008-12-31 23:59:59 just before leap second.
|
|
|
|
eval instant at 0m year(vector(1230767999))
|
|
|
|
{} 2008
|
|
|
|
|
|
|
|
# 2009-01-01 00:00:00 just after leap second.
|
|
|
|
eval instant at 0m year(vector(1230768000))
|
|
|
|
{} 2009
|
|
|
|
|
2018-11-27 08:44:29 -08:00
|
|
|
# 2016-02-29 23:59:59 February 29th in leap year.
|
2016-08-23 14:34:22 -07:00
|
|
|
eval instant at 0m month(vector(1456790399)) + day_of_month(vector(1456790399)) / 100
|
2016-08-22 12:57:33 -07:00
|
|
|
{} 2.29
|
|
|
|
|
|
|
|
# 2016-03-01 00:00:00 March 1st in leap year.
|
2016-08-23 14:34:22 -07:00
|
|
|
eval instant at 0m month(vector(1456790400)) + day_of_month(vector(1456790400)) / 100
|
2016-08-22 12:57:33 -07:00
|
|
|
{} 3.01
|
2016-08-22 13:08:13 -07:00
|
|
|
|
2022-05-20 03:24:19 -07:00
|
|
|
# 2016-12-31 13:37:00 366th day in leap year.
|
|
|
|
eval instant at 0m day_of_year(vector(1483191420))
|
|
|
|
{} 366
|
|
|
|
|
|
|
|
# 2022-12-31 13:37:00 365th day in non-leap year.
|
|
|
|
eval instant at 0m day_of_year(vector(1672493820))
|
|
|
|
{} 365
|
|
|
|
|
2018-11-27 08:44:29 -08:00
|
|
|
# February 1st 2016 in leap year.
|
2016-08-22 13:08:13 -07:00
|
|
|
eval instant at 0m days_in_month(vector(1454284800))
|
|
|
|
{} 29
|
|
|
|
|
2018-11-27 08:44:29 -08:00
|
|
|
# February 1st 2017 not in leap year.
|
2016-08-22 13:08:13 -07:00
|
|
|
eval instant at 0m days_in_month(vector(1485907200))
|
|
|
|
{} 28
|
2017-04-14 03:57:05 -07:00
|
|
|
|
2018-09-18 02:46:13 -07:00
|
|
|
clear
|
|
|
|
|
|
|
|
# Test duplicate labelset in promql output.
|
|
|
|
load 5m
|
|
|
|
testmetric1{src="a",dst="b"} 0
|
|
|
|
testmetric2{src="a",dst="b"} 1
|
|
|
|
|
2018-09-26 00:58:16 -07:00
|
|
|
eval_fail instant at 0m changes({__name__=~'testmetric1|testmetric2'}[5m])
|
|
|
|
|
|
|
|
# Tests for *_over_time
|
|
|
|
clear
|
|
|
|
|
|
|
|
load 10s
|
|
|
|
data{type="numbers"} 2 0 3
|
|
|
|
data{type="some_nan"} 2 0 NaN
|
|
|
|
data{type="some_nan2"} 2 NaN 1
|
|
|
|
data{type="some_nan3"} NaN 0 1
|
|
|
|
data{type="only_nan"} NaN NaN NaN
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m min_over_time(data[2m])
|
2018-09-26 00:58:16 -07:00
|
|
|
{type="numbers"} 0
|
|
|
|
{type="some_nan"} 0
|
|
|
|
{type="some_nan2"} 1
|
|
|
|
{type="some_nan3"} 0
|
|
|
|
{type="only_nan"} NaN
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m max_over_time(data[2m])
|
2018-09-26 00:58:16 -07:00
|
|
|
{type="numbers"} 3
|
|
|
|
{type="some_nan"} 2
|
|
|
|
{type="some_nan2"} 2
|
|
|
|
{type="some_nan3"} 1
|
|
|
|
{type="only_nan"} NaN
|
2020-01-03 07:26:12 -08:00
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m last_over_time(data[2m])
|
2021-02-20 07:34:52 -08:00
|
|
|
data{type="numbers"} 3
|
|
|
|
data{type="some_nan"} NaN
|
|
|
|
data{type="some_nan2"} 1
|
|
|
|
data{type="some_nan3"} 1
|
|
|
|
data{type="only_nan"} NaN
|
|
|
|
|
2020-01-03 07:26:12 -08:00
|
|
|
clear
|
|
|
|
|
2020-10-07 02:09:20 -07:00
|
|
|
# Test for absent()
|
|
|
|
eval instant at 50m absent(nonexistent)
|
|
|
|
{} 1
|
|
|
|
|
|
|
|
eval instant at 50m absent(nonexistent{job="testjob", instance="testinstance", method=~".x"})
|
|
|
|
{instance="testinstance", job="testjob"} 1
|
|
|
|
|
|
|
|
eval instant at 50m absent(nonexistent{job="testjob",job="testjob2",foo="bar"})
|
|
|
|
{foo="bar"} 1
|
|
|
|
|
|
|
|
eval instant at 50m absent(nonexistent{job="testjob",job="testjob2",job="three",foo="bar"})
|
|
|
|
{foo="bar"} 1
|
|
|
|
|
|
|
|
eval instant at 50m absent(nonexistent{job="testjob",job=~"testjob2",foo="bar"})
|
|
|
|
{foo="bar"} 1
|
|
|
|
|
|
|
|
clear
|
|
|
|
|
|
|
|
# Don't return anything when there's something there.
|
|
|
|
load 5m
|
|
|
|
http_requests{job="api-server", instance="0", group="production"} 0+10x10
|
|
|
|
|
|
|
|
eval instant at 50m absent(http_requests)
|
|
|
|
|
|
|
|
eval instant at 50m absent(sum(http_requests))
|
|
|
|
|
|
|
|
clear
|
|
|
|
|
|
|
|
eval instant at 50m absent(sum(nonexistent{job="testjob", instance="testinstance"}))
|
|
|
|
{} 1
|
|
|
|
|
|
|
|
eval instant at 50m absent(max(nonexistant))
|
|
|
|
{} 1
|
|
|
|
|
|
|
|
eval instant at 50m absent(nonexistant > 1)
|
|
|
|
{} 1
|
|
|
|
|
|
|
|
eval instant at 50m absent(a + b)
|
|
|
|
{} 1
|
|
|
|
|
|
|
|
eval instant at 50m absent(a and b)
|
|
|
|
{} 1
|
|
|
|
|
|
|
|
eval instant at 50m absent(rate(nonexistant[5m]))
|
|
|
|
{} 1
|
|
|
|
|
|
|
|
clear
|
|
|
|
|
2020-01-03 07:26:12 -08:00
|
|
|
# Testdata for absent_over_time()
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 1m absent_over_time(http_requests_total[5m])
|
2020-01-03 07:26:12 -08:00
|
|
|
{} 1
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 1m absent_over_time(http_requests_total{handler="/foo"}[5m])
|
2020-01-03 07:26:12 -08:00
|
|
|
{handler="/foo"} 1
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 1m absent_over_time(http_requests_total{handler!="/foo"}[5m])
|
2020-01-03 07:26:12 -08:00
|
|
|
{} 1
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 1m absent_over_time(http_requests_total{handler="/foo", handler="/bar", handler="/foobar"}[5m])
|
2020-01-03 07:26:12 -08:00
|
|
|
{} 1
|
|
|
|
|
|
|
|
eval instant at 1m absent_over_time(rate(nonexistant[5m])[5m:])
|
|
|
|
{} 1
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 1m absent_over_time(http_requests_total{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m])
|
2020-01-03 07:26:12 -08:00
|
|
|
{instance="127.0.0.1"} 1
|
|
|
|
|
|
|
|
load 1m
|
2024-11-21 05:20:38 -08:00
|
|
|
http_requests_total{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10
|
|
|
|
http_requests_total{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10
|
2020-01-03 07:26:12 -08:00
|
|
|
httpd_handshake_failures_total{instance="127.0.0.1",job="node"} 1+1x15
|
|
|
|
httpd_log_lines_total{instance="127.0.0.1",job="node"} 1
|
|
|
|
ssl_certificate_expiry_seconds{job="ingress"} NaN NaN NaN NaN NaN
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 5m absent_over_time(http_requests_total[5m])
|
2020-01-03 07:26:12 -08:00
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 5m absent_over_time(rate(http_requests_total[5m])[5m:1m])
|
2020-01-03 07:26:12 -08:00
|
|
|
|
|
|
|
eval instant at 0m absent_over_time(httpd_log_lines_total[30s])
|
|
|
|
|
|
|
|
eval instant at 1m absent_over_time(httpd_log_lines_total[30s])
|
|
|
|
{} 1
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 15m absent_over_time(http_requests_total[5m])
|
2020-01-03 07:26:12 -08:00
|
|
|
{} 1
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 15m absent_over_time(http_requests_total[10m])
|
2024-04-08 09:46:52 -07:00
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 16m absent_over_time(http_requests_total[6m])
|
2024-04-08 09:46:52 -07:00
|
|
|
{} 1
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 16m absent_over_time(http_requests_total[16m])
|
2020-01-03 07:26:12 -08:00
|
|
|
|
|
|
|
eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m])
|
2024-04-08 09:46:52 -07:00
|
|
|
{} 1
|
|
|
|
|
|
|
|
eval instant at 16m absent_over_time(httpd_handshake_failures_total[2m])
|
2020-01-03 07:26:12 -08:00
|
|
|
|
|
|
|
eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m])
|
|
|
|
|
|
|
|
eval instant at 21m absent_over_time({instance="127.0.0.1"}[5m])
|
|
|
|
{instance="127.0.0.1"} 1
|
|
|
|
|
|
|
|
eval instant at 21m absent_over_time({instance="127.0.0.1"}[20m])
|
|
|
|
|
|
|
|
eval instant at 21m absent_over_time({job="grok"}[20m])
|
|
|
|
{job="grok"} 1
|
|
|
|
|
|
|
|
eval instant at 30m absent_over_time({instance="127.0.0.1"}[5m:5s])
|
|
|
|
{} 1
|
|
|
|
|
|
|
|
eval instant at 5m absent_over_time({job="ingress"}[4m])
|
|
|
|
|
|
|
|
eval instant at 10m absent_over_time({job="ingress"}[4m])
|
|
|
|
{job="ingress"} 1
|
2020-10-07 02:09:20 -07:00
|
|
|
|
|
|
|
clear
|
|
|
|
|
2021-07-29 03:38:11 -07:00
|
|
|
# Testdata for present_over_time()
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 1m present_over_time(http_requests_total[5m])
|
2021-07-29 03:38:11 -07:00
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 1m present_over_time(http_requests_total{handler="/foo"}[5m])
|
2021-07-29 03:38:11 -07:00
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 1m present_over_time(http_requests_total{handler!="/foo"}[5m])
|
2021-07-29 03:38:11 -07:00
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 1m present_over_time(http_requests_total{handler="/foo", handler="/bar", handler="/foobar"}[5m])
|
2021-07-29 03:38:11 -07:00
|
|
|
|
|
|
|
eval instant at 1m present_over_time(rate(nonexistant[5m])[5m:])
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 1m present_over_time(http_requests_total{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m])
|
2021-07-29 03:38:11 -07:00
|
|
|
|
|
|
|
load 1m
|
2024-11-21 05:20:38 -08:00
|
|
|
http_requests_total{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10
|
|
|
|
http_requests_total{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10
|
2021-07-29 03:38:11 -07:00
|
|
|
httpd_handshake_failures_total{instance="127.0.0.1",job="node"} 1+1x15
|
|
|
|
httpd_log_lines_total{instance="127.0.0.1",job="node"} 1
|
|
|
|
ssl_certificate_expiry_seconds{job="ingress"} NaN NaN NaN NaN NaN
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 5m present_over_time(http_requests_total[5m])
|
2021-07-29 03:38:11 -07:00
|
|
|
{instance="127.0.0.1", job="httpd", path="/bar"} 1
|
|
|
|
{instance="127.0.0.1", job="httpd", path="/foo"} 1
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 5m present_over_time(rate(http_requests_total[5m])[5m:1m])
|
2021-07-29 03:38:11 -07:00
|
|
|
{instance="127.0.0.1", job="httpd", path="/bar"} 1
|
|
|
|
{instance="127.0.0.1", job="httpd", path="/foo"} 1
|
|
|
|
|
|
|
|
eval instant at 0m present_over_time(httpd_log_lines_total[30s])
|
|
|
|
{instance="127.0.0.1",job="node"} 1
|
|
|
|
|
|
|
|
eval instant at 1m present_over_time(httpd_log_lines_total[30s])
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 15m present_over_time(http_requests_total[5m])
|
2024-04-08 09:46:52 -07:00
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 15m present_over_time(http_requests_total[10m])
|
2021-07-29 03:38:11 -07:00
|
|
|
{instance="127.0.0.1", job="httpd", path="/bar"} 1
|
|
|
|
{instance="127.0.0.1", job="httpd", path="/foo"} 1
|
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 16m present_over_time(http_requests_total[6m])
|
2024-04-08 09:46:52 -07:00
|
|
|
|
2024-11-21 05:20:38 -08:00
|
|
|
eval instant at 16m present_over_time(http_requests_total[16m])
|
2021-07-29 03:38:11 -07:00
|
|
|
{instance="127.0.0.1", job="httpd", path="/bar"} 1
|
|
|
|
{instance="127.0.0.1", job="httpd", path="/foo"} 1
|
|
|
|
|
|
|
|
eval instant at 16m present_over_time(httpd_handshake_failures_total[1m])
|
|
|
|
|
|
|
|
eval instant at 16m present_over_time({instance="127.0.0.1"}[5m])
|
|
|
|
{instance="127.0.0.1",job="node"} 1
|
|
|
|
|
|
|
|
eval instant at 21m present_over_time({job="grok"}[20m])
|
|
|
|
|
|
|
|
eval instant at 30m present_over_time({instance="127.0.0.1"}[5m:5s])
|
|
|
|
|
|
|
|
eval instant at 5m present_over_time({job="ingress"}[4m])
|
|
|
|
{job="ingress"} 1
|
|
|
|
|
|
|
|
eval instant at 10m present_over_time({job="ingress"}[4m])
|
|
|
|
|
|
|
|
clear
|
|
|
|
|
2020-10-07 02:09:20 -07:00
|
|
|
# Testing exp() sqrt() log2() log10() ln()
|
|
|
|
load 5m
|
|
|
|
exp_root_log{l="x"} 10
|
|
|
|
exp_root_log{l="y"} 20
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m exp(exp_root_log)
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="x"} 22026.465794806718
|
|
|
|
{l="y"} 485165195.4097903
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m exp(exp_root_log - 10)
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="y"} 22026.465794806718
|
|
|
|
{l="x"} 1
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m exp(exp_root_log - 20)
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="x"} 4.5399929762484854e-05
|
|
|
|
{l="y"} 1
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m ln(exp_root_log)
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="x"} 2.302585092994046
|
|
|
|
{l="y"} 2.995732273553991
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m ln(exp_root_log - 10)
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="y"} 2.302585092994046
|
|
|
|
{l="x"} -Inf
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m ln(exp_root_log - 20)
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="y"} -Inf
|
|
|
|
{l="x"} NaN
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m exp(ln(exp_root_log))
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="y"} 20
|
|
|
|
{l="x"} 10
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m sqrt(exp_root_log)
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="x"} 3.1622776601683795
|
|
|
|
{l="y"} 4.47213595499958
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m log2(exp_root_log)
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="x"} 3.3219280948873626
|
|
|
|
{l="y"} 4.321928094887363
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m log2(exp_root_log - 10)
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="y"} 3.3219280948873626
|
|
|
|
{l="x"} -Inf
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m log2(exp_root_log - 20)
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="x"} NaN
|
|
|
|
{l="y"} -Inf
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m log10(exp_root_log)
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="x"} 1
|
|
|
|
{l="y"} 1.301029995663981
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m log10(exp_root_log - 10)
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="y"} 1
|
|
|
|
{l="x"} -Inf
|
|
|
|
|
2024-04-08 09:46:52 -07:00
|
|
|
eval instant at 1m log10(exp_root_log - 20)
|
2020-10-07 02:09:20 -07:00
|
|
|
{l="x"} NaN
|
|
|
|
{l="y"} -Inf
|
|
|
|
|
|
|
|
clear
|
2024-06-16 23:56:56 -07:00
|
|
|
|
|
|
|
# Test that timestamp() handles the scenario where there are more steps than samples.
|
|
|
|
load 1m
|
|
|
|
metric 0+1x1000
|
|
|
|
|
|
|
|
# We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s.
|
|
|
|
eval range from 0 to 61s step 1s timestamp(metric)
|
|
|
|
{} 0x59 60 60
|
2024-10-16 21:39:22 -07:00
|
|
|
|
|
|
|
clear
|
|
|
|
|
|
|
|
# Check round with mixed data types
|
|
|
|
load 1m
|
|
|
|
mixed_metric {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}}
|
|
|
|
|
|
|
|
eval range from 0 to 5m step 1m round(mixed_metric)
|
|
|
|
{} _ 1 2 3
|