diff --git a/cmd/promtool/testdata/unittest.yml b/cmd/promtool/testdata/unittest.yml index ff511729b..24adee714 100644 --- a/cmd/promtool/testdata/unittest.yml +++ b/cmd/promtool/testdata/unittest.yml @@ -113,7 +113,7 @@ tests: - expr: count_over_time(fixed_data[1h]) eval_time: 1h exp_samples: - - value: 61 + - value: 60 - expr: timestamp(fixed_data) eval_time: 1h exp_samples: @@ -183,7 +183,7 @@ tests: - expr: job:test:count_over_time1m eval_time: 1m exp_samples: - - value: 61 + - value: 60 labels: 'job:test:count_over_time1m{job="test"}' - expr: timestamp(job:test:count_over_time1m) eval_time: 1m10s @@ -194,7 +194,7 @@ tests: - expr: job:test:count_over_time1m eval_time: 2m exp_samples: - - value: 61 + - value: 60 labels: 'job:test:count_over_time1m{job="test"}' - expr: timestamp(job:test:count_over_time1m) eval_time: 2m59s999ms diff --git a/promql/engine.go b/promql/engine.go index ea4bc1af8..72869e962 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2207,20 +2207,20 @@ func (ev *evaluator) matrixIterSlice( mintFloats, mintHistograms := mint, mint // First floats... - if len(floats) > 0 && floats[len(floats)-1].T >= mint { + if len(floats) > 0 && floats[len(floats)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; floats[drop].T < mint; drop++ { + for drop = 0; floats[drop].T <= mint; drop++ { } ev.currentSamples -= drop copy(floats, floats[drop:]) floats = floats[:len(floats)-drop] // Only append points with timestamps after the last timestamp we have. - mintFloats = floats[len(floats)-1].T + 1 + mintFloats = floats[len(floats)-1].T } else { ev.currentSamples -= len(floats) if floats != nil { @@ -2229,14 +2229,14 @@ func (ev *evaluator) matrixIterSlice( } // ...then the same for histograms. TODO(beorn7): Use generics? - if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint { + if len(histograms) > 0 && histograms[len(histograms)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; histograms[drop].T < mint; drop++ { + for drop = 0; histograms[drop].T <= mint; drop++ { } // Rotate the buffer around the drop index so that points before mint can be // reused to store new histograms. @@ -2247,7 +2247,7 @@ func (ev *evaluator) matrixIterSlice( histograms = histograms[:len(histograms)-drop] ev.currentSamples -= totalHPointSize(histograms) // Only append points with timestamps after the last timestamp we have. - mintHistograms = histograms[len(histograms)-1].T + 1 + mintHistograms = histograms[len(histograms)-1].T } else { ev.currentSamples -= totalHPointSize(histograms) if histograms != nil { @@ -2271,7 +2271,7 @@ loop: case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: t := buf.AtT() // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintHistograms { + if t > mintHistograms { if histograms == nil { histograms = getMatrixSelectorHPoints() } @@ -2297,7 +2297,7 @@ loop: continue loop } // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintFloats { + if t > mintFloats { ev.currentSamples++ if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) diff --git a/promql/engine_test.go b/promql/engine_test.go index 6c792f5f3..59c5437b5 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -20,7 +20,6 @@ import ( "math" "os" "sort" - "strconv" "testing" "time" @@ -35,6 +34,8 @@ import ( "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/prometheus/prometheus/util/almost" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/teststorage" @@ -927,22 +928,20 @@ load 10s }, }, { - Query: "max_over_time(metricWith1SampleEvery10Seconds[59s])[20s:5s]", + Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 10, - TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 20/5 (using 59s so we always return 6 samples - // as if we run a query on 00 looking back 60 seconds we will return 7 samples; - // see next test). + TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 4 TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 24, }, }, { - Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]", + Query: "max_over_time(metricWith1SampleEvery10Seconds[61s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 11, TotalSamples: 26, // (1 sample / 10 seconds * 60 seconds) * 4 + 2 as - // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. + // max_over_time(metricWith1SampleEvery10Seconds[61s]) @ 190 and 200 will return 7 samples. TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 26, }, @@ -951,10 +950,9 @@ load 10s Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 72, - TotalSamples: 312, // (1 histogram (size 12) / 10 seconds * 60 seconds) * 4 + 2 * 12 as - // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. + TotalSamples: 288, // (1 histogram (size 12) / 10 seconds * 60 seconds) * 4 TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 201000: 312, + 201000: 288, }, }, { @@ -1419,23 +1417,23 @@ load 10s }, { // The peak samples in memory is during the first evaluation: - // - Subquery takes 22 samples, 11 for each bigmetric, - // - Result is calculated per series where the series samples is buffered, hence 11 more here. + // - Subquery takes 22 samples, 11 for each bigmetric, but samples on the left bound won't be evaluated + // - Result is calculated per series where the series samples is buffered, hence 10 more here. // - The result of two series is added before the last series buffer is discarded, so 2 more here. - // Hence at peak it is 22 (subquery) + 11 (buffer of a series) + 2 (result from 2 series). + // Hence at peak it is 22 (subquery) + 10 (buffer of a series) + 2 (result from 2 series). // The subquery samples and the buffer is discarded before duplicating. Query: `rate(bigmetric[10s:1s] @ 10)`, - MaxSamples: 35, + MaxSamples: 34, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, }, { // Here the reasoning is same as above. But LHS and RHS are done one after another. - // So while one of them takes 35 samples at peak, we need to hold the 2 sample + // So while one of them takes 34 samples at peak, we need to hold the 2 sample // result of the other till then. Query: `rate(bigmetric[10s:1s] @ 10) + rate(bigmetric[10s:1s] @ 30)`, - MaxSamples: 37, + MaxSamples: 36, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, @@ -1444,20 +1442,20 @@ load 10s // promql.Sample as above but with only 1 part as step invariant. // Here the peak is caused by the non-step invariant part as it touches more time range. // Hence at peak it is 2*21 (subquery from 0s to 20s) - // + 11 (buffer of a series per evaluation) + // + 10 (buffer of a series per evaluation) // + 6 (result from 2 series at 3 eval times). Query: `rate(bigmetric[10s:1s]) + rate(bigmetric[10s:1s] @ 30)`, - MaxSamples: 59, + MaxSamples: 58, Start: time.Unix(10, 0), End: time.Unix(20, 0), Interval: 5 * time.Second, }, { // Nested subquery. - // We saw that innermost rate takes 35 samples which is still the peak + // We saw that innermost rate takes 34 samples which is still the peak // since the other two subqueries just duplicate the result. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`, - MaxSamples: 35, + MaxSamples: 34, Start: time.Unix(10, 0), }, { @@ -1571,11 +1569,11 @@ load 1ms start: 10, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}}, + Floats: []promql.FPoint{{F: 29, T: 290000}, {F: 30, T: 300000}}, Metric: lbls1, }, promql.Series{ - Floats: []promql.FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}}, + Floats: []promql.FPoint{{F: 58, T: 290000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, @@ -1584,7 +1582,7 @@ load 1ms start: 100, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}}, + Floats: []promql.FPoint{{F: 2, T: -1000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, @@ -1593,7 +1591,7 @@ load 1ms start: 100, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, + Floats: []promql.FPoint{{F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, Metric: lblsneg, }, }, @@ -1602,7 +1600,7 @@ load 1ms start: 100, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, + Floats: []promql.FPoint{{F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, Metric: lblsms, }, }, @@ -3059,27 +3057,27 @@ func TestRangeQuery(t *testing.T) { }{ { Name: "sum_over_time with all values", - Load: `load 30s + Load: `load 15s bar 0 1 10 100 1000`, Query: "sum_over_time(bar[30s])", Result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, + Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 30000}, {F: 1100, T: 60000}}, Metric: labels.EmptyLabels(), }, }, Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 60 * time.Second, + End: time.Unix(60, 0), + Interval: 30 * time.Second, }, { Name: "sum_over_time with trailing values", - Load: `load 30s + Load: `load 15s bar 0 1 10 100 1000 0 0 0 0`, Query: "sum_over_time(bar[30s])", Result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, + Floats: []promql.FPoint{{F: 0, T: 0}, {F: 1100, T: 60000}, {F: 0, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -3094,7 +3092,7 @@ func TestRangeQuery(t *testing.T) { Query: "sum_over_time(bar[30s])", Result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}, {F: 110000, T: 180000}, {F: 11000000, T: 240000}}, + Floats: []promql.FPoint{{F: 0, T: 0}, {F: 10, T: 60000}, {F: 1000, T: 120000}, {F: 100000, T: 180000}, {F: 10000000, T: 240000}}, Metric: labels.EmptyLabels(), }, }, @@ -3104,18 +3102,18 @@ func TestRangeQuery(t *testing.T) { }, { Name: "sum_over_time with all values random", - Load: `load 30s + Load: `load 15s bar 5 17 42 2 7 905 51`, Query: "sum_over_time(bar[30s])", Result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 5, T: 0}, {F: 59, T: 60000}, {F: 9, T: 120000}, {F: 956, T: 180000}}, + Floats: []promql.FPoint{{F: 5, T: 0}, {F: 59, T: 30000}, {F: 9, T: 60000}, {F: 956, T: 90000}}, Metric: labels.EmptyLabels(), }, }, Start: time.Unix(0, 0), - End: time.Unix(180, 0), - Interval: 60 * time.Second, + End: time.Unix(90, 0), + Interval: 30 * time.Second, }, { Name: "metric query", @@ -3208,6 +3206,1076 @@ func TestRangeQuery(t *testing.T) { } } +func TestNativeHistogramRate(t *testing.T) { + // TODO(beorn7): Integrate histograms into the PromQL testing framework + // and write more tests there. + engine := newTestEngine() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + + app := storage.Appender(context.Background()) + for i, h := range tsdbutil.GenerateTestHistograms(100) { + _, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), h, nil) + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + + queryString := fmt.Sprintf("rate(%s[45s])", seriesName) + t.Run("instant_query", func(t *testing.T) { + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) + require.NoError(t, err) + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + vector, err := res.Vector() + require.NoError(t, err) + require.Len(t, vector, 1) + actualHistogram := vector[0].H + expectedHistogram := &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + ZeroThreshold: 0.001, + ZeroCount: 1. / 15., + Count: 9. / 15., + Sum: 1.226666666666667, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + } + require.Equal(t, expectedHistogram, actualHistogram) + }) + + t.Run("range_query", func(t *testing.T) { + step := 30 * time.Second + start := timestamp.Time(int64(5 * time.Minute / time.Millisecond)) + end := start.Add(step) + qry, err := engine.NewRangeQuery(context.Background(), storage, nil, queryString, start, end, step) + require.NoError(t, err) + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + matrix, err := res.Matrix() + require.NoError(t, err) + require.Len(t, matrix, 1) + require.Len(t, matrix[0].Histograms, 2) + actualHistograms := matrix[0].Histograms + expectedHistograms := []promql.HPoint{{ + T: 300000, + H: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + ZeroThreshold: 0.001, + ZeroCount: 1. / 15., + Count: 9. / 15., + Sum: 1.226666666666667, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + }, + }, { + T: 330000, + H: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + ZeroThreshold: 0.001, + ZeroCount: 1. / 15., + Count: 9. / 15., + Sum: 1.226666666666667, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + }, + }} + require.Equal(t, expectedHistograms, actualHistograms) + }) +} + +func TestNativeFloatHistogramRate(t *testing.T) { + // TODO(beorn7): Integrate histograms into the PromQL testing framework + // and write more tests there. + engine := newTestEngine() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + + app := storage.Appender(context.Background()) + for i, fh := range tsdbutil.GenerateTestFloatHistograms(100) { + _, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), nil, fh) + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + + queryString := fmt.Sprintf("rate(%s[61s])", seriesName) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) + require.NoError(t, err) + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + vector, err := res.Vector() + require.NoError(t, err) + require.Len(t, vector, 1) + actualHistogram := vector[0].H + expectedHistogram := &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + ZeroThreshold: 0.001, + ZeroCount: 1. / 15., + Count: 9. / 15., + Sum: 1.226666666666667, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + } + require.Equal(t, expectedHistogram, actualHistogram) +} + +func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + h := &histogram.Histogram{ + Count: 24, + ZeroCount: 4, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + } + for _, floatHisto := range []bool{true, false} { + t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { + engine := newTestEngine() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + + ts := int64(10 * time.Minute / time.Millisecond) + app := storage.Appender(context.Background()) + var err error + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, h.ToFloat(nil)) + } else { + _, err = app.AppendHistogram(0, lbls, ts, h, nil) + } + require.NoError(t, err) + require.NoError(t, app.Commit()) + + queryString := fmt.Sprintf("histogram_count(%s)", seriesName) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + if floatHisto { + require.Equal(t, h.ToFloat(nil).Count, vector[0].F) + } else { + require.Equal(t, float64(h.Count), vector[0].F) + } + + queryString = fmt.Sprintf("histogram_sum(%s)", seriesName) + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res = qry.Exec(context.Background()) + require.NoError(t, res.Err) + + vector, err = res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + if floatHisto { + require.Equal(t, h.ToFloat(nil).Sum, vector[0].F) + } else { + require.Equal(t, h.Sum, vector[0].F) + } + }) + } +} + +func TestNativeHistogram_HistogramStdDevVar(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + testCases := []struct { + name string + h *histogram.Histogram + stdVar float64 + }{ + { + name: "1, 2, 3, 4 low-res", + h: &histogram.Histogram{ + Count: 4, + Sum: 10, + Schema: 2, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 1}, + {Offset: 3, Length: 1}, + {Offset: 2, Length: 2}, + }, + PositiveBuckets: []int64{1, 0, 0, 0}, + }, + stdVar: 1.163807968526718, // actual variance: 1.25 + }, + { + name: "1, 2, 3, 4 hi-res", + h: &histogram.Histogram{ + Count: 4, + Sum: 10, + Schema: 8, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 1}, + {Offset: 255, Length: 1}, + {Offset: 149, Length: 1}, + {Offset: 105, Length: 1}, + }, + PositiveBuckets: []int64{1, 0, 0, 0}, + }, + stdVar: 1.2471347737158793, // actual variance: 1.25 + }, + { + name: "-50, -8, 0, 3, 8, 9, 100", + h: &histogram.Histogram{ + Count: 7, + ZeroCount: 1, + Sum: 62, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 13, Length: 1}, + {Offset: 10, Length: 1}, + {Offset: 1, Length: 1}, + {Offset: 27, Length: 1}, + }, + PositiveBuckets: []int64{1, 0, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 24, Length: 1}, + {Offset: 21, Length: 1}, + }, + NegativeBuckets: []int64{1, 0}, + }, + stdVar: 1844.4651144196398, // actual variance: 1738.4082 + }, + { + name: "-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3", + h: &histogram.Histogram{ + Count: 10, + ZeroCount: 0, + Sum: -112946, + Schema: 0, + NegativeSpans: []histogram.Span{ + {Offset: 2, Length: 3}, + {Offset: 1, Length: 2}, + {Offset: 2, Length: 1}, + {Offset: 3, Length: 1}, + {Offset: 2, Length: 1}, + }, + NegativeBuckets: []int64{1, 0, 0, 0, 0, 2, -2, 0}, + }, + stdVar: 759352122.1939945, // actual variance: 882690990 + }, + { + name: "-10 x10", + h: &histogram.Histogram{ + Count: 10, + ZeroCount: 0, + Sum: -100, + Schema: 0, + NegativeSpans: []histogram.Span{ + {Offset: 4, Length: 1}, + }, + NegativeBuckets: []int64{10}, + }, + stdVar: 1.725830020304794, // actual variance: 0 + }, + { + name: "-50, -8, 0, 3, 8, 9, 100, NaN", + h: &histogram.Histogram{ + Count: 8, + ZeroCount: 1, + Sum: math.NaN(), + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 13, Length: 1}, + {Offset: 10, Length: 1}, + {Offset: 1, Length: 1}, + {Offset: 27, Length: 1}, + }, + PositiveBuckets: []int64{1, 0, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 24, Length: 1}, + {Offset: 21, Length: 1}, + }, + NegativeBuckets: []int64{1, 0}, + }, + stdVar: math.NaN(), + }, + { + name: "-50, -8, 0, 3, 8, 9, 100, +Inf", + h: &histogram.Histogram{ + Count: 7, + ZeroCount: 1, + Sum: math.Inf(1), + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 13, Length: 1}, + {Offset: 10, Length: 1}, + {Offset: 1, Length: 1}, + {Offset: 27, Length: 1}, + }, + PositiveBuckets: []int64{1, 0, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 24, Length: 1}, + {Offset: 21, Length: 1}, + }, + NegativeBuckets: []int64{1, 0}, + }, + stdVar: math.NaN(), + }, + } + for _, tc := range testCases { + for _, floatHisto := range []bool{true, false} { + t.Run(fmt.Sprintf("%s floatHistogram=%t", tc.name, floatHisto), func(t *testing.T) { + engine := newTestEngine() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + + ts := int64(10 * time.Minute / time.Millisecond) + app := storage.Appender(context.Background()) + var err error + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, tc.h.ToFloat(nil)) + } else { + _, err = app.AppendHistogram(0, lbls, ts, tc.h, nil) + } + require.NoError(t, err) + require.NoError(t, app.Commit()) + + queryString := fmt.Sprintf("histogram_stdvar(%s)", seriesName) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + require.InEpsilon(t, tc.stdVar, vector[0].F, 1e-12) + + queryString = fmt.Sprintf("histogram_stddev(%s)", seriesName) + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res = qry.Exec(context.Background()) + require.NoError(t, res.Err) + + vector, err = res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + require.InEpsilon(t, math.Sqrt(tc.stdVar), vector[0].F, 1e-12) + }) + } + } +} + +func TestNativeHistogram_HistogramQuantile(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + type subCase struct { + quantile string + value float64 + } + + cases := []struct { + text string + // Histogram to test. + h *histogram.Histogram + // Different quantiles to test for this histogram. + subCases []subCase + }{ + { + text: "all positive buckets with zero bucket", + h: &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + }, + subCases: []subCase{ + { + quantile: "1.0001", + value: math.Inf(1), + }, + { + quantile: "1", + value: 16, + }, + { + quantile: "0.99", + value: 15.759999999999998, + }, + { + quantile: "0.9", + value: 13.600000000000001, + }, + { + quantile: "0.6", + value: 4.799999999999997, + }, + { + quantile: "0.5", + value: 1.6666666666666665, + }, + { // Zero bucket. + quantile: "0.1", + value: 0.0006000000000000001, + }, + { + quantile: "0", + value: 0, + }, + { + quantile: "-1", + value: math.Inf(-1), + }, + }, + }, + { + text: "all negative buckets with zero bucket", + h: &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + subCases: []subCase{ + { + quantile: "1.0001", + value: math.Inf(1), + }, + { // Zero bucket. + quantile: "1", + value: 0, + }, + { // Zero bucket. + quantile: "0.99", + value: -6.000000000000048e-05, + }, + { // Zero bucket. + quantile: "0.9", + value: -0.0005999999999999996, + }, + { + quantile: "0.5", + value: -1.6666666666666667, + }, + { + quantile: "0.1", + value: -13.6, + }, + { + quantile: "0", + value: -16, + }, + { + quantile: "-1", + value: math.Inf(-1), + }, + }, + }, + { + text: "both positive and negative buckets with zero bucket", + h: &histogram.Histogram{ + Count: 24, + ZeroCount: 4, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + subCases: []subCase{ + { + quantile: "1.0001", + value: math.Inf(1), + }, + { + quantile: "1", + value: 16, + }, + { + quantile: "0.99", + value: 15.519999999999996, + }, + { + quantile: "0.9", + value: 11.200000000000003, + }, + { + quantile: "0.7", + value: 1.2666666666666657, + }, + { // Zero bucket. + quantile: "0.55", + value: 0.0006000000000000005, + }, + { // Zero bucket. + quantile: "0.5", + value: 0, + }, + { // Zero bucket. + quantile: "0.45", + value: -0.0005999999999999996, + }, + { + quantile: "0.3", + value: -1.266666666666667, + }, + { + quantile: "0.1", + value: -11.2, + }, + { + quantile: "0.01", + value: -15.52, + }, + { + quantile: "0", + value: -16, + }, + { + quantile: "-1", + value: math.Inf(-1), + }, + }, + }, + } + + engine := newTestEngine() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + idx := int64(0) + for _, floatHisto := range []bool{true, false} { + for _, c := range cases { + t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + ts := idx * int64(10*time.Minute/time.Millisecond) + app := storage.Appender(context.Background()) + var err error + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat(nil)) + } else { + _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) + } + require.NoError(t, err) + require.NoError(t, app.Commit()) + + for j, sc := range c.subCases { + t.Run(fmt.Sprintf("%d %s", j, sc.quantile), func(t *testing.T) { + queryString := fmt.Sprintf("histogram_quantile(%s, %s)", sc.quantile, seriesName) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + require.True(t, almost.Equal(sc.value, vector[0].F, defaultEpsilon)) + }) + } + idx++ + }) + } + } +} + +func TestNativeHistogram_HistogramFraction(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + type subCase struct { + lower, upper string + value float64 + } + + invariantCases := []subCase{ + { + lower: "42", + upper: "3.1415", + value: 0, + }, + { + lower: "0", + upper: "0", + value: 0, + }, + { + lower: "0.000001", + upper: "0.000001", + value: 0, + }, + { + lower: "42", + upper: "42", + value: 0, + }, + { + lower: "-3.1", + upper: "-3.1", + value: 0, + }, + { + lower: "3.1415", + upper: "NaN", + value: math.NaN(), + }, + { + lower: "NaN", + upper: "42", + value: math.NaN(), + }, + { + lower: "NaN", + upper: "NaN", + value: math.NaN(), + }, + { + lower: "-Inf", + upper: "+Inf", + value: 1, + }, + } + + cases := []struct { + text string + // Histogram to test. + h *histogram.Histogram + // Different ranges to test for this histogram. + subCases []subCase + }{ + { + text: "empty histogram", + h: &histogram.Histogram{}, + subCases: []subCase{ + { + lower: "3.1415", + upper: "42", + value: math.NaN(), + }, + }, + }, + { + text: "all positive buckets with zero bucket", + h: &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, // Abs: 2, 3, 1, 4 + }, + subCases: append([]subCase{ + { + lower: "0", + upper: "+Inf", + value: 1, + }, + { + lower: "-Inf", + upper: "0", + value: 0, + }, + { + lower: "-0.001", + upper: "0", + value: 0, + }, + { + lower: "0", + upper: "0.001", + value: 2. / 12., + }, + { + lower: "0", + upper: "0.0005", + value: 1. / 12., + }, + { + lower: "0.001", + upper: "inf", + value: 10. / 12., + }, + { + lower: "-inf", + upper: "-0.001", + value: 0, + }, + { + lower: "1", + upper: "2", + value: 3. / 12., + }, + { + lower: "1.5", + upper: "2", + value: 1.5 / 12., + }, + { + lower: "1", + upper: "8", + value: 4. / 12., + }, + { + lower: "1", + upper: "6", + value: 3.5 / 12., + }, + { + lower: "1.5", + upper: "6", + value: 2. / 12., + }, + { + lower: "-2", + upper: "-1", + value: 0, + }, + { + lower: "-2", + upper: "-1.5", + value: 0, + }, + { + lower: "-8", + upper: "-1", + value: 0, + }, + { + lower: "-6", + upper: "-1", + value: 0, + }, + { + lower: "-6", + upper: "-1.5", + value: 0, + }, + }, invariantCases...), + }, + { + text: "all negative buckets with zero bucket", + h: &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + subCases: append([]subCase{ + { + lower: "0", + upper: "+Inf", + value: 0, + }, + { + lower: "-Inf", + upper: "0", + value: 1, + }, + { + lower: "-0.001", + upper: "0", + value: 2. / 12., + }, + { + lower: "0", + upper: "0.001", + value: 0, + }, + { + lower: "-0.0005", + upper: "0", + value: 1. / 12., + }, + { + lower: "0.001", + upper: "inf", + value: 0, + }, + { + lower: "-inf", + upper: "-0.001", + value: 10. / 12., + }, + { + lower: "1", + upper: "2", + value: 0, + }, + { + lower: "1.5", + upper: "2", + value: 0, + }, + { + lower: "1", + upper: "8", + value: 0, + }, + { + lower: "1", + upper: "6", + value: 0, + }, + { + lower: "1.5", + upper: "6", + value: 0, + }, + { + lower: "-2", + upper: "-1", + value: 3. / 12., + }, + { + lower: "-2", + upper: "-1.5", + value: 1.5 / 12., + }, + { + lower: "-8", + upper: "-1", + value: 4. / 12., + }, + { + lower: "-6", + upper: "-1", + value: 3.5 / 12., + }, + { + lower: "-6", + upper: "-1.5", + value: 2. / 12., + }, + }, invariantCases...), + }, + { + text: "both positive and negative buckets with zero bucket", + h: &histogram.Histogram{ + Count: 24, + ZeroCount: 4, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + subCases: append([]subCase{ + { + lower: "0", + upper: "+Inf", + value: 0.5, + }, + { + lower: "-Inf", + upper: "0", + value: 0.5, + }, + { + lower: "-0.001", + upper: "0", + value: 2. / 24, + }, + { + lower: "0", + upper: "0.001", + value: 2. / 24., + }, + { + lower: "-0.0005", + upper: "0.0005", + value: 2. / 24., + }, + { + lower: "0.001", + upper: "inf", + value: 10. / 24., + }, + { + lower: "-inf", + upper: "-0.001", + value: 10. / 24., + }, + { + lower: "1", + upper: "2", + value: 3. / 24., + }, + { + lower: "1.5", + upper: "2", + value: 1.5 / 24., + }, + { + lower: "1", + upper: "8", + value: 4. / 24., + }, + { + lower: "1", + upper: "6", + value: 3.5 / 24., + }, + { + lower: "1.5", + upper: "6", + value: 2. / 24., + }, + { + lower: "-2", + upper: "-1", + value: 3. / 24., + }, + { + lower: "-2", + upper: "-1.5", + value: 1.5 / 24., + }, + { + lower: "-8", + upper: "-1", + value: 4. / 24., + }, + { + lower: "-6", + upper: "-1", + value: 3.5 / 24., + }, + { + lower: "-6", + upper: "-1.5", + value: 2. / 24., + }, + }, invariantCases...), + }, + } + idx := int64(0) + for _, floatHisto := range []bool{true, false} { + for _, c := range cases { + t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { + engine := newTestEngine() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + + ts := idx * int64(10*time.Minute/time.Millisecond) + app := storage.Appender(context.Background()) + var err error + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat(nil)) + } else { + _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) + } + require.NoError(t, err) + require.NoError(t, app.Commit()) + + for j, sc := range c.subCases { + t.Run(fmt.Sprintf("%d %s %s", j, sc.lower, sc.upper), func(t *testing.T) { + queryString := fmt.Sprintf("histogram_fraction(%s, %s, %s)", sc.lower, sc.upper, seriesName) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + if math.IsNaN(sc.value) { + require.True(t, math.IsNaN(vector[0].F)) + return + } + require.Equal(t, sc.value, vector[0].F) + }) + } + idx++ + }) + } + } +} + func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { // TODO(codesome): Integrate histograms into the PromQL testing framework // and write more tests there. @@ -3335,7 +4403,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { _, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42) require.NoError(t, err) for idx1, h := range c.histograms { - lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) + lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) // Since we mutate h later, we need to create a copy here. var err error if floatHisto { @@ -3403,8 +4471,8 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { queryString = fmt.Sprintf("avg(%s)", seriesName) queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) - offset := int64(len(c.histograms) - 1) - newTs := ts + offset*int64(time.Minute/time.Millisecond) + offset := len(c.histograms) + newTs := ts + int64(time.Minute/time.Millisecond)*int64(offset-1) // sum_over_time(). queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset) @@ -3607,7 +4675,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { ts := idx0 * int64(10*time.Minute/time.Millisecond) app := storage.Appender(context.Background()) for idx1, h := range c.histograms { - lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) + lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) // Since we mutate h later, we need to create a copy here. var err error if floatHisto { diff --git a/promql/promqltest/testdata/at_modifier.test b/promql/promqltest/testdata/at_modifier.test index 3ba6afc49..7174a8423 100644 --- a/promql/promqltest/testdata/at_modifier.test +++ b/promql/promqltest/testdata/at_modifier.test @@ -76,45 +76,43 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] offset 20s @ 100) # Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries. # Inner most sum=1+2+...+10=55. -# With [100s:25s] subquery, it's 55*5. +# With [100s:25s] subquery, it's 55*4. eval instant at 100s sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50) - {job="1"} 275 + {job="1"} 220 # Nested subqueries with different timestamps on both. # Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries. -# Sum of innermost subquery is 275 as above. The outer subquery repeats it 4 times. +# Sum of innermost subquery is 220 as above. The outer subquery repeats it 3 times. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)[3s:1s] @ 3000) - {job="1"} 1100 + {job="1"} 660 # Testing the inner subquery timestamp since vector selector does not have @. # Inner sum for subquery [100s:25s] @ 50 are -# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=4+5=9. -# This sum of 11 is repeated 4 times by outer subquery. +# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=5. +# This sum of 7 is repeated 3 times by outer subquery. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 50)[3s:1s] @ 200) - {job="1"} 44 + {job="1"} 21 # Inner sum for subquery [100s:25s] @ 200 are -# at 100=9+10, at 125=12, at 150=14+15, at 175=17, at 200=19+20. -# This sum of 116 is repeated 4 times by outer subquery. +# at 125=12, at 150=15, at 175=17, at 200=20. +# This sum of 64 is repeated 3 times by outer subquery. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 200)[3s:1s] @ 50) - {job="1"} 464 + {job="1"} 192 # Nested subqueries with timestamp only on outer subquery. # Outer most subquery: -# at 900=783 -# inner subquery: at 870=87+86+85, at 880=88+87+86, at 890=89+88+87 -# at 925=537 -# inner subquery: at 895=89+88, at 905=90+89, at 915=90+91 -# at 950=828 -# inner subquery: at 920=92+91+90, at 930=93+92+91, at 940=94+93+92 -# at 975=567 -# inner subquery: at 945=94+93, at 955=95+94, at 965=96+95 -# at 1000=873 -# inner subquery: at 970=97+96+95, at 980=98+97+96, at 990=99+98+97 +# at 925=360 +# inner subquery: at 905=90+89, at 915=91+90 +# at 950=372 +# inner subquery: at 930=93+92, at 940=94+93 +# at 975=380 +# inner subquery: at 955=95+94, at 965=96+95 +# at 1000=392 +# inner subquery: at 980=98+97, at 990=99+98 eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[20s])[20s:10s] offset 10s)[100s:25s] @ 1000) - {job="1"} 3588 + {job="1"} 1504 # minute is counted on the value of the sample. eval instant at 10s minute(metric @ 1500) @@ -137,32 +135,32 @@ eval instant at 15m timestamp(timestamp(metric{job="1"} @ 10)) # minute is counted on the value of the sample. eval instant at 0s sum_over_time(minute(metric @ 1500)[100s:10s]) - {job="1"} 22 - {job="2"} 55 + {job="1"} 20 + {job="2"} 50 # If nothing passed, minute() takes eval time. # Here the eval time is determined by the subquery. # [50m:1m] at 6000, i.e. 100m, is 50m to 100m. -# sum=50+51+52+...+59+0+1+2+...+40. +# sum=51+52+...+59+0+1+2+...+40. eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000) + {} 1315 + +# sum=46+47+...+59+0+1+2+...+35. +eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m) {} 1365 -# sum=45+46+47+...+59+0+1+2+...+35. -eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m) - {} 1410 - # time() is the eval time which is determined by subquery here. -# 2900+2901+...+3000 = (3000*3001 - 2899*2900)/2. +# 2901+...+3000 = (3000*3001 - 2899*2900)/2. eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000) - {} 297950 + {} 295050 -# 2300+2301+...+2400 = (2400*2401 - 2299*2300)/2. +# 2301+...+2400 = (2400*2401 - 2299*2300)/2. eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000 offset 600s) - {} 237350 + {} 235050 # timestamp() takes the time of the sample and not the evaluation time. eval instant at 0s sum_over_time(timestamp(metric{job="1"} @ 10)[100s:10s] @ 3000) - {job="1"} 110 + {job="1"} 100 # The result of inner timestamp() will have the timestamp as the # eval time, hence entire expression is not step invariant and depends on eval time. diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 2c198374a..60f35bae9 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -6,9 +6,6 @@ load 5m # Tests for resets(). eval instant at 50m resets(http_requests[5m]) - {path="/foo"} 0 - {path="/bar"} 0 - {path="/biz"} 0 eval instant at 50m resets(http_requests[20m]) {path="/foo"} 1 @@ -16,8 +13,8 @@ eval instant at 50m resets(http_requests[20m]) {path="/biz"} 0 eval instant at 50m resets(http_requests[30m]) - {path="/foo"} 2 - {path="/bar"} 1 + {path="/foo"} 1 + {path="/bar"} 0 {path="/biz"} 0 eval instant at 50m resets(http_requests[50m]) @@ -29,28 +26,25 @@ eval instant at 50m resets(nonexistent_metric[50m]) # Tests for changes(). eval instant at 50m changes(http_requests[5m]) - {path="/foo"} 0 - {path="/bar"} 0 - {path="/biz"} 0 eval instant at 50m changes(http_requests[20m]) - {path="/foo"} 3 - {path="/bar"} 3 + {path="/foo"} 2 + {path="/bar"} 2 {path="/biz"} 0 eval instant at 50m changes(http_requests[30m]) - {path="/foo"} 4 - {path="/bar"} 5 - {path="/biz"} 1 + {path="/foo"} 3 + {path="/bar"} 4 + {path="/biz"} 0 eval instant at 50m changes(http_requests[50m]) - {path="/foo"} 8 - {path="/bar"} 9 + {path="/foo"} 7 + {path="/bar"} 8 {path="/biz"} 1 eval instant at 50m changes((http_requests[50m])) - {path="/foo"} 8 - {path="/bar"} 9 + {path="/foo"} 7 + {path="/bar"} 8 {path="/biz"} 1 eval instant at 50m changes(nonexistent_metric[50m]) @@ -63,7 +57,7 @@ load 5m eval instant at 15m changes(x[15m]) {a="b"} 0 - {a="c"} 2 + {a="c"} 1 clear @@ -77,7 +71,7 @@ load 5m # Tests for increase(). eval instant at 50m increase(http_requests[50m]) {path="/foo"} 100 - {path="/bar"} 90 + {path="/bar"} 88.88888888888889 {path="/dings"} 100 {path="/bumms"} 100 @@ -115,11 +109,10 @@ load 5m # Counter resets at in the middle of range are handled correctly by rate(). eval instant at 50m rate(testcounter_reset_middle[50m]) - {} 0.03 + {} 0.02962962962962963 # Counter resets at end of range are ignored by rate(). eval instant at 50m rate(testcounter_reset_end[5m]) - {} 0 clear @@ -237,19 +230,19 @@ eval instant at 50m deriv(testcounter_reset_middle[100m]) # intercept at t=3000: 38.63636363636364 # intercept at t=3000+3600: 76.81818181818181 eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600) - {} 76.81818181818181 + {} 70 # intercept at t = 3000+3600 = 6600 eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) - {} 76.81818181818181 + {} 70 # intercept at t = 600+3600 = 4200 eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) - {} 51.36363636363637 + {} 48.18181818181818 # intercept at t = 4200+3600 = 7800 eval instant at 70m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) - {} 89.54545454545455 + {} 80.9090909090909 # With http_requests, there is a sample value exactly at the end of # the range, and it has exactly the predicted value, so predict_linear @@ -678,10 +671,10 @@ load 10s metric10 -9.988465674311579e+307 9.988465674311579e+307 eval instant at 1m avg_over_time(metric[1m]) - {} 3 + {} 3.5 eval instant at 1m sum_over_time(metric[1m])/count_over_time(metric[1m]) - {} 3 + {} 3.5 eval instant at 1m avg_over_time(metric2[1m]) {} Inf @@ -748,8 +741,8 @@ eval instant at 1m avg_over_time(metric8[1m]) {} 9.988465674311579e+307 # This overflows float64. -eval instant at 1m sum_over_time(metric8[1m])/count_over_time(metric8[1m]) - {} Inf +eval instant at 1m sum_over_time(metric8[2m])/count_over_time(metric8[2m]) + {} +Inf eval instant at 1m avg_over_time(metric9[1m]) {} -9.988465674311579e+307 @@ -758,10 +751,16 @@ eval instant at 1m avg_over_time(metric9[1m]) eval instant at 1m sum_over_time(metric9[1m])/count_over_time(metric9[1m]) {} -Inf -eval instant at 1m avg_over_time(metric10[1m]) +eval instant at 45s avg_over_time(metric10[1m]) {} 0 -eval instant at 1m sum_over_time(metric10[1m])/count_over_time(metric10[1m]) +eval instant at 1m avg_over_time(metric10[2m]) + {} 0 + +eval instant at 45s sum_over_time(metric10[1m])/count_over_time(metric10[1m]) + {} 0 + +eval instant at 1m sum_over_time(metric10[2m])/count_over_time(metric10[2m]) {} 0 # Test if very big intermediate values cause loss of detail. @@ -770,7 +769,7 @@ load 10s metric 1 1e100 1 -1e100 eval instant at 1m sum_over_time(metric[1m]) - {} 2 + {} 1 # Tests for stddev_over_time and stdvar_over_time. clear @@ -778,13 +777,13 @@ load 10s metric 0 8 8 2 3 eval instant at 1m stdvar_over_time(metric[1m]) - {} 10.56 + {} 7.6875 eval instant at 1m stddev_over_time(metric[1m]) - {} 3.249615 + {} 2.7726341266023544 eval instant at 1m stddev_over_time((metric[1m])) - {} 3.249615 + {} 2.7726341266023544 # Tests for stddev_over_time and stdvar_over_time #4927. clear @@ -814,24 +813,24 @@ load 10s data{test="uneven samples"} 0 1 4 eval instant at 1m quantile_over_time(0, data[1m]) - {test="two samples"} 0 - {test="three samples"} 0 - {test="uneven samples"} 0 - -eval instant at 1m quantile_over_time(0.5, data[1m]) - {test="two samples"} 0.5 + {test="two samples"} 1 {test="three samples"} 1 {test="uneven samples"} 1 -eval instant at 1m quantile_over_time(0.75, data[1m]) - {test="two samples"} 0.75 +eval instant at 1m quantile_over_time(0.5, data[1m]) + {test="two samples"} 1 {test="three samples"} 1.5 {test="uneven samples"} 2.5 +eval instant at 1m quantile_over_time(0.75, data[1m]) + {test="two samples"} 1 + {test="three samples"} 1.75 + {test="uneven samples"} 3.25 + eval instant at 1m quantile_over_time(0.8, data[1m]) - {test="two samples"} 0.8 - {test="three samples"} 1.6 - {test="uneven samples"} 2.8 + {test="two samples"} 1 + {test="three samples"} 1.8 + {test="uneven samples"} 3.4000000000000004 eval instant at 1m quantile_over_time(1, data[1m]) {test="two samples"} 1 @@ -965,8 +964,8 @@ eval instant at 1m min_over_time(data[1m]) eval instant at 1m max_over_time(data[1m]) {type="numbers"} 3 - {type="some_nan"} 2 - {type="some_nan2"} 2 + {type="some_nan"} 0 + {type="some_nan2"} 1 {type="some_nan3"} 1 {type="only_nan"} NaN @@ -1063,13 +1062,19 @@ eval instant at 1m absent_over_time(httpd_log_lines_total[30s]) {} 1 eval instant at 15m absent_over_time(http_requests[5m]) - -eval instant at 16m absent_over_time(http_requests[5m]) {} 1 +eval instant at 15m absent_over_time(http_requests[10m]) + eval instant at 16m absent_over_time(http_requests[6m]) + {} 1 + +eval instant at 16m absent_over_time(http_requests[16m]) eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m]) + {} 1 + +eval instant at 16m absent_over_time(httpd_handshake_failures_total[2m]) eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m]) @@ -1125,17 +1130,18 @@ eval instant at 0m present_over_time(httpd_log_lines_total[30s]) eval instant at 1m present_over_time(httpd_log_lines_total[30s]) eval instant at 15m present_over_time(http_requests[5m]) + +eval instant at 15m present_over_time(http_requests[10m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 -eval instant at 16m present_over_time(http_requests[5m]) - eval instant at 16m present_over_time(http_requests[6m]) + +eval instant at 16m present_over_time(http_requests[16m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 eval instant at 16m present_over_time(httpd_handshake_failures_total[1m]) - {instance="127.0.0.1", job="node"} 1 eval instant at 16m present_over_time({instance="127.0.0.1"}[5m]) {instance="127.0.0.1",job="node"} 1 diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test index f30c07e7b..3fc4bea49 100644 --- a/promql/promqltest/testdata/histograms.test +++ b/promql/promqltest/testdata/histograms.test @@ -93,15 +93,15 @@ eval instant at 50m histogram_quantile(0.8, testhistogram_bucket) {start="negative"} 0.3 # More realistic with rates. -eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m])) {start="positive"} 0.048 {start="negative"} -0.2 -eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m])) {start="positive"} 0.15 {start="negative"} -0.15 -eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m])) {start="positive"} 0.72 {start="negative"} 0.3 @@ -125,58 +125,58 @@ eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) {} 5 # Aggregated histogram: Everything in one. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.075 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.1277777777777778 # Aggregated histogram: Everything in one. Now with avg, which does not change anything. -eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.075 -eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.12777777777777778 # Aggregated histogram: By instance. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) {instance="ins1"} 0.075 {instance="ins2"} 0.075 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 # Aggregated histogram: By job. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) {job="job1"} 0.1 {job="job2"} 0.0642857142857143 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) {job="job1"} 0.14 {job="job2"} 0.1125 # Aggregated histogram: By job and instance. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.1166666666666667 # The unaggregated histogram for comparison. Same result as the previous one. -eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m])) +eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m])) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 @@ -205,15 +205,15 @@ eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) {} 979.75 # Buckets with different representations of the same upper bound. -eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[10m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} NaN -eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[10m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN -eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(1, rate(mixed_bucket[10m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN @@ -222,7 +222,7 @@ load 5m empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10 empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10 -eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m])) +eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m])) {instance="ins1", job="job1"} NaN # Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 37818e4f8..93fbea430 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -133,8 +133,8 @@ eval instant at 50m histogram_quantile(0.5, incr_histogram) {} 1.5 # Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum. -eval instant at 50m rate(incr_histogram[5m]) - {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} +eval instant at 50m rate(incr_histogram[10m]) + {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} # Calculate the 50th percentile of observations over the last 10m. eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m])) diff --git a/promql/promqltest/testdata/operators.test b/promql/promqltest/testdata/operators.test index df2311b9b..2e0872c0a 100644 --- a/promql/promqltest/testdata/operators.test +++ b/promql/promqltest/testdata/operators.test @@ -113,8 +113,8 @@ eval instant at 50m http_requests{job="api-server", group="canary"} http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="1", job="api-server"} 400 -eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[5m]) * 5 * 60 - {group="canary", instance="0", job="api-server"} 330 +eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[10m]) * 5 * 60 + {group="canary", instance="0", job="api-server"} 330 {group="canary", instance="1", job="api-server"} 440 eval instant at 50m rate(http_requests[25m]) * 25 * 60 diff --git a/promql/promqltest/testdata/staleness.test b/promql/promqltest/testdata/staleness.test index 76ee2f287..06081963c 100644 --- a/promql/promqltest/testdata/staleness.test +++ b/promql/promqltest/testdata/staleness.test @@ -30,6 +30,8 @@ eval instant at 10s count_over_time(metric[1s]) eval instant at 20s count_over_time(metric[1s]) eval instant at 20s count_over_time(metric[10s]) + +eval instant at 20s count_over_time(metric[20s]) {} 1 diff --git a/promql/promqltest/testdata/subquery.test b/promql/promqltest/testdata/subquery.test index db85b1622..6682c30b5 100644 --- a/promql/promqltest/testdata/subquery.test +++ b/promql/promqltest/testdata/subquery.test @@ -10,18 +10,18 @@ eval instant at 10s sum_over_time(metric[50s:5s]) # Every evaluation yields the last value, i.e. 2 eval instant at 5m sum_over_time(metric[50s:10s]) - {} 12 + {} 10 # Series becomes stale at 5m10s (5m after last sample) -# Hence subquery gets a single sample at 6m-50s=5m10s. -eval instant at 6m sum_over_time(metric[50s:10s]) +# Hence subquery gets a single sample at 5m10s. +eval instant at 6m sum_over_time(metric[60s:10s]) {} 2 eval instant at 10s rate(metric[20s:10s]) {} 0.1 eval instant at 20s rate(metric[20s:5s]) - {} 0.05 + {} 0.06666666666666667 clear @@ -49,16 +49,16 @@ load 10s metric3 0+3x1000 eval instant at 1000s sum_over_time(metric1[30s:10s]) - {} 394 + {} 297 # This is (394*2 - 100), because other than the last 100 at 1000s, # everything else is repeated with the 5s step. eval instant at 1000s sum_over_time(metric1[30s:5s]) - {} 688 + {} 591 # Offset is aligned with the step. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s) - {} 394 + {} 297 # Same result for different offsets due to step alignment. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s) @@ -78,16 +78,16 @@ eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s) # Nested subqueries eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s]) - {} 0.4 + {} 0.30000000000000004 eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s]) - {} 0.8 + {} 0.6000000000000001 eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s]) - {} 1.2 + {} 0.9 eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s]) - {} 2.4 + {} 1.8 clear @@ -102,15 +102,15 @@ eval instant at 80s rate(metric[1m]) # No extrapolation, [2@20, 144@80]: (144 - 2) / 60 eval instant at 80s rate(metric[1m:10s]) - {} 2.366666667 + {} 2.4 # Only one value between 10s and 20s, 2@14 eval instant at 20s min_over_time(metric[10s]) {} 2 -# min(1@10, 2@20) +# min(2@20) eval instant at 20s min_over_time(metric[10s:10s]) - {} 1 + {} 2 eval instant at 20m min_over_time(rate(metric[5m])[20m:1m]) {} 0.12119047619047618