From 34230bb17293535b65a53e33cbaf626663a679c3 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 27 Nov 2023 17:20:27 +0000 Subject: [PATCH 01/99] tsdb/wlog: close segment files sooner 'defer' runs at the end of the whole function; we should close each segment file as soon as we finished reading it. Signed-off-by: Bryan Boreham --- tsdb/wlog/watcher.go | 5 +++-- tsdb/wlog/watcher_test.go | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 1c76e3887..56cd0cc4e 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -730,10 +730,11 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err if err != nil { return fmt.Errorf("unable to open segment: %w", err) } - defer sr.Close() r := NewLiveReader(w.logger, w.readerMetrics, sr) - if err := readFn(w, r, index, false); err != nil && !errors.Is(err, io.EOF) { + err = readFn(w, r, index, false) + sr.Close() + if err != nil && !errors.Is(err, io.EOF) { return fmt.Errorf("readSegment: %w", err) } diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index b30dce91a..2686d3bc9 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -218,11 +218,11 @@ func TestTailSamples(t *testing.T) { for i := first; i <= last; i++ { segment, err := OpenReadSegment(SegmentName(watcher.walDir, i)) require.NoError(t, err) - defer segment.Close() reader := NewLiveReader(nil, NewLiveReaderMetrics(nil), segment) // Use tail true so we can ensure we got the right number of samples. watcher.readSegment(reader, i, true) + require.NoError(t, segment.Close()) } expectedSeries := seriesCount From fb6a45f06bbe474096162d33c6ac8afcbbb71f45 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Wed, 17 Jan 2024 18:28:06 +0100 Subject: [PATCH 02/99] tsdb/wlog: Only treat unknown record types as failure Signed-off-by: Arve Knudsen --- CHANGELOG.md | 1 + tsdb/wlog/watcher.go | 10 ++++------ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1dfcc5c33..e17124abe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ * [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991 * [BUGFIX] OTLP: Don't generate target_info unless at least one identifying label is defined. #13991 * [BUGFIX] OTLP: Don't generate target_info unless there are metrics. #13991 +* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042 ## 2.52.0-rc.1 / 2024-05-03 diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 8ebd9249a..fd4f5f20f 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -685,14 +685,12 @@ func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error } w.writer.UpdateSeriesSegment(series, segmentNum) - // Ignore these; we're only interested in series. - case record.Samples: - case record.Exemplars: - case record.Tombstones: - - default: + case record.Unknown: // Could be corruption, or reading from a WAL from a newer Prometheus. w.recordDecodeFailsMetric.Inc() + + default: + // We're only interested in series. } } if err := r.Err(); err != nil { From 694f717dc44849592b439fce6ffa5fbcdf7957a6 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 28 May 2024 15:23:50 +0200 Subject: [PATCH 03/99] Watcher.readSegment: Only consider unknown rec types failures Signed-off-by: Arve Knudsen --- tsdb/wlog/watcher.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index fd4f5f20f..5a73acdd4 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -625,6 +625,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { w.writer.AppendHistograms(histogramsToSend) histogramsToSend = histogramsToSend[:0] } + case record.FloatHistogramSamples: // Skip if experimental "histograms over remote write" is not enabled. if !w.sendHistograms { @@ -652,11 +653,13 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { w.writer.AppendFloatHistograms(floatHistogramsToSend) floatHistogramsToSend = floatHistogramsToSend[:0] } - case record.Tombstones: - default: + case record.Unknown: // Could be corruption, or reading from a WAL from a newer Prometheus. w.recordDecodeFailsMetric.Inc() + + default: + // We're not interested in other types of records. } } if err := r.Err(); err != nil { From 9a837b7f3c1b6d5ef75cb36808bea9189eced911 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 4 Jul 2024 15:51:41 +0200 Subject: [PATCH 04/99] promql: Make groupedAggregation.groupCount a float64 It's always used as such. Let's avoid the countless conversions. Signed-off-by: beorn7 --- promql/engine.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index bf19aac8b..7c84a0a27 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2779,7 +2779,7 @@ type groupedAggregation struct { floatValue float64 histogramValue *histogram.FloatHistogram floatMean float64 // Mean, or "compensating value" for Kahan summation. - groupCount int + groupCount float64 groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group heap vectorByValueHeap } @@ -2855,8 +2855,8 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix if h != nil { group.hasHistogram = true if group.histogramValue != nil { - left := h.Copy().Div(float64(group.groupCount)) - right := group.histogramValue.Copy().Div(float64(group.groupCount)) + left := h.Copy().Div(group.groupCount) + right := group.histogramValue.Copy().Div(group.groupCount) toAdd, err := left.Sub(right) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) @@ -2889,7 +2889,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix } } // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. - group.floatMean += f/float64(group.groupCount) - group.floatMean/float64(group.groupCount) + group.floatMean += f/group.groupCount - group.floatMean/group.groupCount } case parser.GROUP: @@ -2912,7 +2912,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix if h == nil { // Ignore native histograms. group.groupCount++ delta := f - group.floatMean - group.floatMean += delta / float64(group.groupCount) + group.floatMean += delta / group.groupCount group.floatValue += delta * (f - group.floatMean) } @@ -2945,13 +2945,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix } case parser.COUNT: - aggr.floatValue = float64(aggr.groupCount) + aggr.floatValue = aggr.groupCount case parser.STDVAR: - aggr.floatValue /= float64(aggr.groupCount) + aggr.floatValue /= aggr.groupCount case parser.STDDEV: - aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount)) + aggr.floatValue = math.Sqrt(aggr.floatValue / aggr.groupCount) case parser.QUANTILE: aggr.floatValue = quantile(q, aggr.heap) From 44d8c1d1828e063a8d695681826f1f6e13f0f88c Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 4 Jul 2024 15:52:48 +0200 Subject: [PATCH 05/99] nit: add period at end of sentence Signed-off-by: beorn7 --- promql/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/promql/engine.go b/promql/engine.go index 7c84a0a27..06a26e377 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2780,7 +2780,7 @@ type groupedAggregation struct { histogramValue *histogram.FloatHistogram floatMean float64 // Mean, or "compensating value" for Kahan summation. groupCount float64 - groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group + groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group. heap vectorByValueHeap } From b5b04ddbe3733a296a531f2a54462ab5529062a7 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 4 Jul 2024 18:58:41 +0200 Subject: [PATCH 06/99] promql: add avg aggregation benchmark Signed-off-by: beorn7 --- promql/bench_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/promql/bench_test.go b/promql/bench_test.go index bd6728029..33523b2db 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -165,6 +165,9 @@ func rangeQueryCases() []benchCase { { expr: "sum(a_X)", }, + { + expr: "avg(a_X)", + }, { expr: "sum without (l)(h_X)", }, From c46074f4dda17dda1e93d79ed8e6e015bb222bdb Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 4 Jul 2024 15:13:35 +0200 Subject: [PATCH 07/99] promql: make avg aggregation more precise and less expensive MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The basic idea here is that the previous code was always doing incremental calculation of the mean value, which is more costly and can be less precise. It protects against overflows, but in most cases, an overflow doesn't happen anyway. The other idea applied here is to expand on #14074, where Kahan summation was applied to sum(). With this commit, the average is calculated in a conventional way (adding everything up and divide in the end) as long as the sum isn't overflowing float64. This is combined with Kahan summation so that the avg aggregation, in most cases, is really equivalent to the sum aggregation with a following division (which is the user's expectation as avg is supposed to be syntactic sugar for sum with a following divison). If the sum hits ±Inf, the calculation reverts to incremental calculation of the mean value. Kahan summation is also applied here, although it cannot fully compensate for the numerical errors introduced by the incremental mean calculation. (The tests added in this commit would fail if incremental mean calculation was always used.) Signed-off-by: beorn7 --- promql/engine.go | 59 +++++++++++++++------ promql/promqltest/testdata/aggregators.test | 20 ++++++- 2 files changed, 61 insertions(+), 18 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 06a26e377..8a0aa23f6 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2773,15 +2773,19 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram } type groupedAggregation struct { + floatValue float64 + histogramValue *histogram.FloatHistogram + floatMean float64 + floatKahanC float64 // "Compensating value" for Kahan summation. + groupCount float64 + heap vectorByValueHeap + + // All bools together for better packing within the struct. seen bool // Was this output groups seen in the input at this timestamp. hasFloat bool // Has at least 1 float64 sample aggregated. hasHistogram bool // Has at least 1 histogram sample aggregated. - floatValue float64 - histogramValue *histogram.FloatHistogram - floatMean float64 // Mean, or "compensating value" for Kahan summation. - groupCount float64 groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group. - heap vectorByValueHeap + incrementalMean bool // True after reverting to incremental calculation of the mean value. } // aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix. @@ -2807,13 +2811,11 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix *group = groupedAggregation{ seen: true, floatValue: f, + floatMean: f, groupCount: 1, } switch op { - case parser.AVG: - group.floatMean = f - fallthrough - case parser.SUM: + case parser.AVG, parser.SUM: if h == nil { group.hasFloat = true } else { @@ -2821,7 +2823,6 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix group.hasHistogram = true } case parser.STDVAR, parser.STDDEV: - group.floatMean = f group.floatValue = 0 case parser.QUANTILE: group.heap = make(vectorByValueHeap, 1) @@ -2847,7 +2848,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // point in copying the histogram in that case. } else { group.hasFloat = true - group.floatValue, group.floatMean = kahanSumInc(f, group.floatValue, group.floatMean) + group.floatValue, group.floatKahanC = kahanSumInc(f, group.floatValue, group.floatKahanC) } case parser.AVG: @@ -2871,6 +2872,22 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // point in copying the histogram in that case. } else { group.hasFloat = true + if !group.incrementalMean { + newV, newC := kahanSumInc(f, group.floatValue, group.floatKahanC) + if !math.IsInf(newV, 0) { + // The sum doesn't overflow, so we propagate it to the + // group struct and continue with the regular + // calculation of the mean value. + group.floatValue, group.floatKahanC = newV, newC + break + } + // If we are here, we know that the sum _would_ overflow. So + // instead of continue to sum up, we revert to incremental + // calculation of the mean value from here on. + group.incrementalMean = true + group.floatMean = group.floatValue / (group.groupCount - 1) + group.floatKahanC /= group.groupCount - 1 + } if math.IsInf(group.floatMean, 0) { if math.IsInf(f, 0) && (group.floatMean > 0) == (f > 0) { // The `floatMean` and `s.F` values are `Inf` of the same sign. They @@ -2888,8 +2905,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix break } } - // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. - group.floatMean += f/group.groupCount - group.floatMean/group.groupCount + currentMean := group.floatMean + group.floatKahanC + group.floatMean, group.floatKahanC = kahanSumInc( + // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. + f/group.groupCount-currentMean/group.groupCount, + group.floatMean, + group.floatKahanC, + ) } case parser.GROUP: @@ -2938,10 +2960,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange())) continue } - if aggr.hasHistogram { + switch { + case aggr.hasHistogram: aggr.histogramValue = aggr.histogramValue.Compact(0) - } else { - aggr.floatValue = aggr.floatMean + case aggr.incrementalMean: + aggr.floatValue = aggr.floatMean + aggr.floatKahanC + default: + aggr.floatValue = (aggr.floatValue + aggr.floatKahanC) / aggr.groupCount } case parser.COUNT: @@ -2965,7 +2990,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix if aggr.hasHistogram { aggr.histogramValue.Compact(0) } else { - aggr.floatValue += aggr.floatMean // Add Kahan summation compensating term. + aggr.floatValue += aggr.floatKahanC } default: // For other aggregations, we already have the right value. diff --git a/promql/promqltest/testdata/aggregators.test b/promql/promqltest/testdata/aggregators.test index cbb255a12..68d2e735b 100644 --- a/promql/promqltest/testdata/aggregators.test +++ b/promql/promqltest/testdata/aggregators.test @@ -503,7 +503,7 @@ eval instant at 1m avg(data{test="-big"}) eval instant at 1m avg(data{test="bigzero"}) {} 0 -# Test summing extreme values. +# Test summing and averaging extreme values. clear load 10s @@ -529,21 +529,39 @@ load 10s eval instant at 1m sum(data{test="ten"}) {} 10 +eval instant at 1m avg(data{test="ten"}) + {} 2.5 + eval instant at 1m sum by (group) (data{test="pos_inf"}) {group="1"} Inf {group="2"} Inf +eval instant at 1m avg by (group) (data{test="pos_inf"}) + {group="1"} Inf + {group="2"} Inf + eval instant at 1m sum by (group) (data{test="neg_inf"}) {group="1"} -Inf {group="2"} -Inf +eval instant at 1m avg by (group) (data{test="neg_inf"}) + {group="1"} -Inf + {group="2"} -Inf + eval instant at 1m sum(data{test="inf_inf"}) {} NaN +eval instant at 1m avg(data{test="inf_inf"}) + {} NaN + eval instant at 1m sum by (group) (data{test="nan"}) {group="1"} NaN {group="2"} NaN +eval instant at 1m avg by (group) (data{test="nan"}) + {group="1"} NaN + {group="2"} NaN + clear # Test that aggregations are deterministic. From 3a908d8e088763e6df7f7a5f74345ada30c9b0bb Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 4 Jul 2024 18:36:32 +0200 Subject: [PATCH 08/99] promql: Improve Kahan usage in avg_over_time The calculation of the mean value in avg_over_time is performed in an incremental fashion. This introduces additional numerical errors that even Kahan summation cannot compensate, but at least we can use the Kahan-corrected mean value when we use the intermediate mean value in the calculation. Signed-off-by: beorn7 --- promql/functions.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/promql/functions.go b/promql/functions.go index dcc2cd759..575f8302d 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -593,7 +593,8 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode continue } } - mean, c = kahanSumInc(f.F/count-mean/count, mean, c) + correctedMean := mean + c + mean, c = kahanSumInc(f.F/count-correctedMean/count, mean, c) } if math.IsInf(mean, 0) { From cff0429b1ada726887f1fa717f44b63f72d45877 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 4 Jul 2024 18:47:52 +0200 Subject: [PATCH 09/99] promql: make avg_over_time faster and more precise Same idea as for the avg aggregator before: Most of the time, there is no overflow, so we don't have to revert to the more expensive and less precise incremental calculation of the mean value. Signed-off-by: beorn7 --- promql/functions.go | 32 ++++++++++++++++++----- promql/promqltest/testdata/functions.test | 4 ++- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 575f8302d..ca987545d 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -573,9 +573,28 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return vec, nil } return aggrOverTime(vals, enh, func(s Series) float64 { - var mean, count, c float64 + var ( + sum, mean, count, kahanC float64 + incrementalMean bool + ) for _, f := range s.Floats { count++ + if !incrementalMean { + newSum, newC := kahanSumInc(f.F, sum, kahanC) + // Perform regular mean calculation as long as + // the sum doesn't overflow and (in any case) + // for the first iteration (even if we start + // with ±Inf) to not run into division-by-zero + // problems below. + if count == 1 || !math.IsInf(newSum, 0) { + sum, kahanC = newSum, newC + continue + } + // Handle overflow by reverting to incremental calculation of the mean value. + incrementalMean = true + mean = sum / (count - 1) + kahanC /= count - 1 + } if math.IsInf(mean, 0) { if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) { // The `mean` and `f.F` values are `Inf` of the same sign. They @@ -593,14 +612,13 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode continue } } - correctedMean := mean + c - mean, c = kahanSumInc(f.F/count-correctedMean/count, mean, c) + correctedMean := mean + kahanC + mean, kahanC = kahanSumInc(f.F/count-correctedMean/count, mean, kahanC) } - - if math.IsInf(mean, 0) { - return mean + if incrementalMean { + return mean + kahanC } - return mean + c + return (sum + kahanC) / count }), nil } diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 718e001c3..290beb5b9 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -737,7 +737,6 @@ eval instant at 1m avg_over_time(metric6c[1m]) eval instant at 1m sum_over_time(metric6c[1m])/count_over_time(metric6c[1m]) {} NaN - eval instant at 1m avg_over_time(metric7[1m]) {} NaN @@ -772,6 +771,9 @@ load 10s eval instant at 1m sum_over_time(metric[1m]) {} 2 +eval instant at 1m avg_over_time(metric[1m]) + {} 0.5 + # Tests for stddev_over_time and stdvar_over_time. clear load 10s From c04924bc41982dfef9ea29939c1a2c6fe56333c7 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Wed, 24 Jul 2024 16:47:33 +0200 Subject: [PATCH 10/99] otlptranslator: Add tests for BuildCompliantName Signed-off-by: Arve Knudsen --- .../prometheus/normalize_label.go | 11 +- .../prometheus/normalize_name.go | 25 +-- .../prometheus/normalize_name_test.go | 203 ++++++++++++++++++ .../prometheus/testutils_test.go | 49 +++++ 4 files changed, 270 insertions(+), 18 deletions(-) create mode 100644 storage/remote/otlptranslator/prometheus/normalize_name_test.go create mode 100644 storage/remote/otlptranslator/prometheus/testutils_test.go diff --git a/storage/remote/otlptranslator/prometheus/normalize_label.go b/storage/remote/otlptranslator/prometheus/normalize_label.go index 6360aa976..a112b9bbc 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -21,15 +21,14 @@ import ( "unicode" ) -// Normalizes the specified label to follow Prometheus label names standard +// Normalizes the specified label to follow Prometheus label names standard. // -// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels +// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels. // -// Labels that start with non-letter rune will be prefixed with "key_" +// Labels that start with non-letter rune will be prefixed with "key_". // -// Exception is made for double-underscores which are allowed +// An exception is made for double-underscores which are allowed. func NormalizeLabel(label string) string { - // Trivial case if len(label) == 0 { return label @@ -48,7 +47,7 @@ func NormalizeLabel(label string) string { return label } -// Return '_' for anything non-alphanumeric +// Return '_' for anything non-alphanumeric. func sanitizeRune(r rune) rune { if unicode.IsLetter(r) || unicode.IsDigit(r) { return r diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index 71bba40e4..0f472b80a 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -76,14 +76,15 @@ var perUnitMap = map[string]string{ "y": "year", } -// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric +// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric. // // Metric name is prefixed with specified namespace and underscore (if any). // Namespace is not cleaned up. Make sure specified namespace follows Prometheus // naming convention. // -// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels -// and https://prometheus.io/docs/practices/naming/#metric-and-label-naming +// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels, +// https://prometheus.io/docs/practices/naming/#metric-and-label-naming +// and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.33.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { var metricName string @@ -110,7 +111,7 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix // Build a normalized name for the specified metric func normalizeName(metric pmetric.Metric, namespace string) string { - // Split metric name in "tokens" (remove all non-alphanumeric) + // Split metric name into "tokens" (remove all non-alphanumerics) nameTokens := strings.FieldsFunc( metric.Name(), func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }, @@ -122,9 +123,9 @@ func normalizeName(metric pmetric.Metric, namespace string) string { // Main unit // Append if not blank, doesn't contain '{}', and is not present in metric name already if len(unitTokens) > 0 { - mainUnitOtel := strings.TrimSpace(unitTokens[0]) - if mainUnitOtel != "" && !strings.ContainsAny(mainUnitOtel, "{}") { - mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOtel)) + mainUnitOTel := strings.TrimSpace(unitTokens[0]) + if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { + mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOTel)) if mainUnitProm != "" && !contains(nameTokens, mainUnitProm) { nameTokens = append(nameTokens, mainUnitProm) } @@ -133,11 +134,11 @@ func normalizeName(metric pmetric.Metric, namespace string) string { // Per unit // Append if not blank, doesn't contain '{}', and is not present in metric name already if len(unitTokens) > 1 && unitTokens[1] != "" { - perUnitOtel := strings.TrimSpace(unitTokens[1]) - if perUnitOtel != "" && !strings.ContainsAny(perUnitOtel, "{}") { - perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOtel)) + perUnitOTel := strings.TrimSpace(unitTokens[1]) + if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { + perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOTel)) if perUnitProm != "" && !contains(nameTokens, perUnitProm) { - nameTokens = append(append(nameTokens, "per"), perUnitProm) + nameTokens = append(nameTokens, "per", perUnitProm) } } } @@ -150,7 +151,7 @@ func normalizeName(metric pmetric.Metric, namespace string) string { } // Append _ratio for metrics with unit "1" - // Some Otel receivers improperly use unit "1" for counters of objects + // Some OTel receivers improperly use unit "1" for counters of objects // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go new file mode 100644 index 000000000..ee25bb2df --- /dev/null +++ b/storage/remote/otlptranslator/prometheus/normalize_name_test.go @@ -0,0 +1,203 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name_test.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. + +package prometheus + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +func TestByte(t *testing.T) { + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "")) +} + +func TestByteCounter(t *testing.T) { + require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "")) + require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "")) +} + +func TestWhiteSpaces(t *testing.T) { + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "")) +} + +func TestNonStandardUnit(t *testing.T) { + require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "")) +} + +func TestNonStandardUnitCounter(t *testing.T) { + require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "")) +} + +func TestBrokenUnit(t *testing.T) { + require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "")) + require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "")) + require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "")) +} + +func TestBrokenUnitCounter(t *testing.T) { + require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "")) + require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "")) + require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "")) +} + +func TestRatio(t *testing.T) { + require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "")) + require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "")) + require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "")) +} + +func TestHertz(t *testing.T) { + require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "")) +} + +func TestPer(t *testing.T) { + require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "")) + require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "")) +} + +func TestPercent(t *testing.T) { + require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "")) + require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "")) +} + +func TestEmpty(t *testing.T) { + require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "")) + require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "")) +} + +func TestUnsupportedRunes(t *testing.T) { + require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "")) + require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "")) + require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "")) +} + +func TestOTelReceivers(t *testing.T) { + require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "")) + require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "")) + require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "")) + require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "")) + require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "")) + require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), "")) + require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), "")) + require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), "")) + require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), "")) + require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), "")) + require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), "")) + require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), "")) + require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), "")) + require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "")) + require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "")) + require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), "")) + require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "")) + require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "")) + require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), "")) + require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "")) + require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "")) + require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "")) +} + +func TestTrimPromSuffixes(t *testing.T) { + assert.Equal(t, "active_directory_ds_replication_network_io", TrimPromSuffixes("active_directory_ds_replication_network_io_bytes_total", pmetric.MetricTypeSum, "bytes")) + assert.Equal(t, "active_directory_ds_name_cache_hit_rate", TrimPromSuffixes("active_directory_ds_name_cache_hit_rate_percent", pmetric.MetricTypeGauge, "percent")) + assert.Equal(t, "active_directory_ds_ldap_bind_last_successful_time", TrimPromSuffixes("active_directory_ds_ldap_bind_last_successful_time_milliseconds", pmetric.MetricTypeGauge, "milliseconds")) + assert.Equal(t, "apache_requests", TrimPromSuffixes("apache_requests_total", pmetric.MetricTypeSum, "1")) + assert.Equal(t, "system_cpu_utilization", TrimPromSuffixes("system_cpu_utilization_ratio", pmetric.MetricTypeGauge, "ratio")) + assert.Equal(t, "mongodbatlas_process_journaling_data_files", TrimPromSuffixes("mongodbatlas_process_journaling_data_files_mebibytes", pmetric.MetricTypeGauge, "mebibytes")) + assert.Equal(t, "mongodbatlas_process_network_io", TrimPromSuffixes("mongodbatlas_process_network_io_bytes_per_second", pmetric.MetricTypeGauge, "bytes_per_second")) + assert.Equal(t, "mongodbatlas_process_oplog_rate", TrimPromSuffixes("mongodbatlas_process_oplog_rate_gibibytes_per_hour", pmetric.MetricTypeGauge, "gibibytes_per_hour")) + assert.Equal(t, "nsxt_node_memory_usage", TrimPromSuffixes("nsxt_node_memory_usage_kilobytes", pmetric.MetricTypeGauge, "kilobytes")) + assert.Equal(t, "redis_latest_fork", TrimPromSuffixes("redis_latest_fork_microseconds", pmetric.MetricTypeGauge, "microseconds")) + assert.Equal(t, "up", TrimPromSuffixes("up", pmetric.MetricTypeGauge, "")) + + // These are not necessarily valid OM units, only tested for the sake of completeness. + assert.Equal(t, "active_directory_ds_replication_sync_object_pending", TrimPromSuffixes("active_directory_ds_replication_sync_object_pending_total", pmetric.MetricTypeSum, "{objects}")) + assert.Equal(t, "apache_current", TrimPromSuffixes("apache_current_connections", pmetric.MetricTypeGauge, "connections")) + assert.Equal(t, "bigip_virtual_server_request_count", TrimPromSuffixes("bigip_virtual_server_request_count_total", pmetric.MetricTypeSum, "{requests}")) + assert.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", TrimPromSuffixes("mongodbatlas_process_db_query_targeting_scanned_per_returned", pmetric.MetricTypeGauge, "{scanned}/{returned}")) + assert.Equal(t, "nginx_connections_accepted", TrimPromSuffixes("nginx_connections_accepted", pmetric.MetricTypeGauge, "connections")) + assert.Equal(t, "apache_workers", TrimPromSuffixes("apache_workers_connections", pmetric.MetricTypeGauge, "connections")) + assert.Equal(t, "nginx", TrimPromSuffixes("nginx_requests", pmetric.MetricTypeGauge, "requests")) + + // Units shouldn't be trimmed if the unit is not a direct match with the suffix, i.e, a suffix "_seconds" shouldn't be removed if unit is "sec" or "s" + assert.Equal(t, "system_cpu_load_average_15m_ratio", TrimPromSuffixes("system_cpu_load_average_15m_ratio", pmetric.MetricTypeGauge, "1")) + assert.Equal(t, "mongodbatlas_process_asserts_per_second", TrimPromSuffixes("mongodbatlas_process_asserts_per_second", pmetric.MetricTypeGauge, "{assertions}/s")) + assert.Equal(t, "memcached_operation_hit_ratio_percent", TrimPromSuffixes("memcached_operation_hit_ratio_percent", pmetric.MetricTypeGauge, "%")) + assert.Equal(t, "active_directory_ds_replication_object_rate_per_second", TrimPromSuffixes("active_directory_ds_replication_object_rate_per_second", pmetric.MetricTypeGauge, "{objects}/s")) + assert.Equal(t, "system_disk_operation_time_seconds", TrimPromSuffixes("system_disk_operation_time_seconds_total", pmetric.MetricTypeSum, "s")) +} + +func TestNamespace(t *testing.T) { + require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space")) + require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space")) +} + +func TestCleanUpString(t *testing.T) { + require.Equal(t, "", CleanUpString("")) + require.Equal(t, "a_b", CleanUpString("a b")) + require.Equal(t, "hello_world", CleanUpString("hello, world!")) + require.Equal(t, "hello_you_2", CleanUpString("hello you 2")) + require.Equal(t, "1000", CleanUpString("$1000")) + require.Equal(t, "", CleanUpString("*+$^=)")) +} + +func TestUnitMapGetOrDefault(t *testing.T) { + require.Equal(t, "", unitMapGetOrDefault("")) + require.Equal(t, "seconds", unitMapGetOrDefault("s")) + require.Equal(t, "invalid", unitMapGetOrDefault("invalid")) +} + +func TestPerUnitMapGetOrDefault(t *testing.T) { + require.Equal(t, "", perUnitMapGetOrDefault("")) + require.Equal(t, "second", perUnitMapGetOrDefault("s")) + require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid")) +} + +func TestRemoveItem(t *testing.T) { + require.Equal(t, []string{}, removeItem([]string{}, "test")) + require.Equal(t, []string{}, removeItem([]string{}, "")) + require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d")) + require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "")) + require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c")) + require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b")) + require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a")) +} + +func TestBuildCompliantNameWithNormalize(t *testing.T) { + require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true)) + require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true)) + require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true)) + require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true)) + require.Equal(t, "foo_bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true)) + require.Equal(t, "foo_bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true)) + // Gauges with unit 1 are considered ratios. + require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true)) + // Slashes in units are converted. + require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true)) +} + +func TestBuildCompliantNameWithoutSuffixes(t *testing.T) { + require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", false)) + require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", false)) + require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", false)) + require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", false)) + require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false)) + require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false)) + require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false)) +} diff --git a/storage/remote/otlptranslator/prometheus/testutils_test.go b/storage/remote/otlptranslator/prometheus/testutils_test.go new file mode 100644 index 000000000..363328c57 --- /dev/null +++ b/storage/remote/otlptranslator/prometheus/testutils_test.go @@ -0,0 +1,49 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/testutils_test.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. + +package prometheus + +import ( + "go.opentelemetry.io/collector/pdata/pmetric" +) + +var ilm pmetric.ScopeMetrics + +func init() { + + metrics := pmetric.NewMetrics() + resourceMetrics := metrics.ResourceMetrics().AppendEmpty() + ilm = resourceMetrics.ScopeMetrics().AppendEmpty() + +} + +// Returns a new Metric of type "Gauge" with specified name and unit +func createGauge(name string, unit string) pmetric.Metric { + gauge := ilm.Metrics().AppendEmpty() + gauge.SetName(name) + gauge.SetUnit(unit) + gauge.SetEmptyGauge() + return gauge +} + +// Returns a new Metric of type Monotonic Sum with specified name and unit +func createCounter(name string, unit string) pmetric.Metric { + counter := ilm.Metrics().AppendEmpty() + counter.SetEmptySum().SetIsMonotonic(true) + counter.SetName(name) + counter.SetUnit(unit) + return counter +} From a4a5994f69d9af423ec886b2c0e20a05453ac832 Mon Sep 17 00:00:00 2001 From: Callum Styan Date: Mon, 22 Jul 2024 15:07:12 -0700 Subject: [PATCH 11/99] clarify that 1.0 will eventually be deprecated, it is not yet deprecated Signed-off-by: Callum Styan --- config/config.go | 5 +++-- .../examples/remote_storage/example_write_adapter/README.md | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/config/config.go b/config/config.go index c924e3098..173689d6a 100644 --- a/config/config.go +++ b/config/config.go @@ -1085,8 +1085,9 @@ func (m RemoteWriteProtoMsgs) String() string { } var ( - // RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf - // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/. + // RemoteWriteProtoMsgV1 represents the `prometheus.WriteRequest` protobuf + // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/, + // which will eventually be deprecated. // // NOTE: This string is used for both HTTP header values and config value, so don't change // this reference. diff --git a/documentation/examples/remote_storage/example_write_adapter/README.md b/documentation/examples/remote_storage/example_write_adapter/README.md index 739cf3be3..968d2b25c 100644 --- a/documentation/examples/remote_storage/example_write_adapter/README.md +++ b/documentation/examples/remote_storage/example_write_adapter/README.md @@ -19,7 +19,7 @@ remote_write: protobuf_message: "io.prometheus.write.v2.Request" ``` -or for deprecated Remote Write 1.0 message: +or for the eventually deprecated Remote Write 1.0 message: ```yaml remote_write: From 7b5897a46d3afe25912e97b73b89e599db66fde8 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 25 Jul 2024 17:51:29 +0100 Subject: [PATCH 12/99] Prepare release 2.54.0-rc.0 (#14498) Signed-off-by: Bryan Boreham --- CHANGELOG.md | 41 ++++++++++++++++++-- VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 +- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++---- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 +- 7 files changed, 52 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e7314d041..02ffc5e4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,44 @@ # Changelog -## unreleased +## 2.54.0-rc.0 / 2024-07-19 -* [FEATURE] Remote-Write: Add sender and receiver support for [Remote Write 2.0-rc.2](https://prometheus.io/docs/specs/remote_write_spec_2_0/) specification #14395 #14427 #14444 -* [ENHANCEMENT] Remote-Write: 1.x messages against Remote Write 2.x Receivers will have now correct values for `prometheus_storage__failed_total` in case of partial errors #14444 +Release 2.54 brings a release candidate of a major new version of [Remote Write: 2.0](https://prometheus.io/docs/specs/remote_write_spec_2_0/). +This is experimental at this time and may still change. +Remote-write v2 is enabled by default, but can be disabled via feature-flag `web.remote-write-receiver.accepted-protobuf-messages`. + +* [CHANGE] Remote-Write: `highest_timestamp_in_seconds` and `queue_highest_sent_timestamp_seconds` metrics now initialized to 0. #14437 +* [CHANGE] API: Split warnings from info annotations in API response. #14327 +* [FEATURE] Remote-Write: Version 2.0 experimental, plus metadata in WAL via feature flag `metadata-wal-records` (defaults on). #14395,#14427,#14444 +* [FEATURE] PromQL: add limitk() and limit_ratio() aggregation operators. #12503 +* [ENHANCEMENT] PromQL: Accept underscores in literal numbers, e.g. 1_000_000 for 1 million. #12821 +* [ENHANCEMENT] PromQL: float literal numbers and durations are now interchangeable (experimental). Example: `time() - my_timestamp > 10m`. #9138 +* [ENHANCEMENT] PromQL: use Kahan summation for sum(). #14074,#14362 +* [ENHANCEMENT] PromQL (experimental native histograms): Optimize `histogram_count` and `histogram_sum` functions. #14097 +* [ENHANCEMENT] TSDB: Better support for out-of-order experimental native histogram samples. #14438 +* [ENHANCEMENT] TSDB: Optimise seek within index. #14393 +* [ENHANCEMENT] TSDB: Optimise deletion of stale series. #14307 +* [ENHANCEMENT] TSDB: Reduce locking to optimise adding and removing series. #13286,#14286 +* [ENHANCEMENT] Regexps: Optimize patterns with multiple prefixes. #13843,#14368 +* [ENHANCEMENT] Regexps: Optimize patterns containing multiple literal strings. #14173 +* [ENHANCEMENT] AWS SD: expose Primary IPv6 addresses as __meta_ec2_primary_ipv6_addresses. #14156 +* [ENHANCEMENT] Docker SD: add MatchFirstNetwork for containers with multiple networks. #10490 +* [ENHANCEMENT] OpenStack SD: Use `flavor.original_name` if available. #14312 +* [ENHANCEMENT] UI (experimental native histograms): more accurate representation. #13680,#14430 +* [ENHANCEMENT] Agent: `out_of_order_time_window` config option now applies to agent. #14094 +* [ENHANCEMENT] Notifier: Send any outstanding Alertmanager notifications when shutting down. #14290 +* [ENHANCEMENT] Rules: Add label-matcher support to Rules API. #10194 +* [ENHANCEMENT] HTTP API: Add url to message logged on error while sending response. #14209 +* [BUGFIX] CLI: escape `|` characters when generating docs. #14420 +* [BUGFIX] PromQL (experimental native histograms): Fix some binary operators between native histogram values. #14454 +* [BUGFIX] TSDB: LabelNames API could fail during compaction. #14279 +* [BUGFIX] TSDB: Fix rare issue where pending OOO read can be left dangling if creating querier fails. #14341 +* [BUGFIX] TSDB: fix check for context cancellation in LabelNamesFor. #14302 +* [BUGFIX] Rules: Fix rare panic on reload. #14366 +* [BUGFIX] Config: In YAML marshalling, do not output a regexp field if it was never set. #14004 +* [BUGFIX] Remote-Write: reject samples with future timestamps. #14304 +* [BUGFIX] Remote-Write: Fix data corruption in remote write if max_sample_age is applied. #14078 +* [BUGFIX] Notifier: Fix Alertmanager discovery not updating under heavy load. #14174 +* [BUGFIX] Regexes: some Unicode characters were not matched by case-insensitive comparison. #14170,#14299 ## 2.53.1 / 2024-07-10 diff --git a/VERSION b/VERSION index f419e2c6f..69539c388 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.53.1 +2.54.0-rc.0 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index ba924346f..02c1d2286 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.53.1", + "version": "0.54.0-rc.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.53.1", + "@prometheus-io/lezer-promql": "0.54.0-rc.0", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index cbd03ae2b..af2fcae67 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.53.1", + "version": "0.54.0-rc.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 62ac34e43..17bb0f272 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.53.1", + "version": "0.54.0-rc.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.53.1", + "version": "0.54.0-rc.0", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.53.1", + "version": "0.54.0-rc.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.53.1", + "@prometheus-io/lezer-promql": "0.54.0-rc.0", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -69,7 +69,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.53.1", + "version": "0.54.0-rc.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.0", @@ -19332,7 +19332,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.53.1", + "version": "0.54.0-rc.0", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", @@ -19350,7 +19350,7 @@ "@lezer/lr": "^1.4.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.53.1", + "@prometheus-io/codemirror-promql": "0.54.0-rc.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", diff --git a/web/ui/package.json b/web/ui/package.json index 693a73dec..80e8d815f 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.2.2", "typescript": "^4.9.5" }, - "version": "0.53.1" + "version": "0.54.0-rc.0" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index c8002433a..df90049ce 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.53.1", + "version": "0.54.0-rc.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.17.0", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.4.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.53.1", + "@prometheus-io/codemirror-promql": "0.54.0-rc.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", From 4fb2183437728b5107b27323ff1520de0fa21203 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Fri, 26 Jul 2024 11:21:58 +0200 Subject: [PATCH 13/99] Test a couple more cases without suffix gen Signed-off-by: Arve Knudsen --- storage/remote/otlptranslator/prometheus/normalize_name_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go index ee25bb2df..07b9b0a78 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name_test.go @@ -200,4 +200,6 @@ func TestBuildCompliantNameWithoutSuffixes(t *testing.T) { require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false)) require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false)) require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false)) + require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false)) + require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false)) } From 9caba4be7d0f9e4bf2b7945a65ded197ba7acdc1 Mon Sep 17 00:00:00 2001 From: Sergey Date: Fri, 26 Jul 2024 15:32:11 +0300 Subject: [PATCH 14/99] chore: use HumanizeDuration and ConvertToFloat from prometheus/common Signed-off-by: Sergey --- template/template.go | 45 +++++--------------------------------------- 1 file changed, 5 insertions(+), 40 deletions(-) diff --git a/template/template.go b/template/template.go index dbe1607cf..c507dbe74 100644 --- a/template/template.go +++ b/template/template.go @@ -23,7 +23,6 @@ import ( "net" "net/url" "sort" - "strconv" "strings" text_template "text/template" "time" @@ -106,25 +105,6 @@ func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (quer return result, nil } -func convertToFloat(i interface{}) (float64, error) { - switch v := i.(type) { - case float64: - return v, nil - case string: - return strconv.ParseFloat(v, 64) - case int: - return float64(v), nil - case uint: - return float64(v), nil - case int64: - return float64(v), nil - case uint64: - return float64(v), nil - default: - return 0, fmt.Errorf("can't convert %T to float", v) - } -} - // Expander executes templates in text or HTML mode with a common set of Prometheus template functions. type Expander struct { text string @@ -219,7 +199,7 @@ func NewTemplateExpander( return host }, "humanize": func(i interface{}) (string, error) { - v, err := convertToFloat(i) + v, err := common_templates.ConvertToFloat(i) if err != nil { return "", err } @@ -248,7 +228,7 @@ func NewTemplateExpander( return fmt.Sprintf("%.4g%s", v, prefix), nil }, "humanize1024": func(i interface{}) (string, error) { - v, err := convertToFloat(i) + v, err := common_templates.ConvertToFloat(i) if err != nil { return "", err } @@ -267,30 +247,15 @@ func NewTemplateExpander( }, "humanizeDuration": common_templates.HumanizeDuration, "humanizePercentage": func(i interface{}) (string, error) { - v, err := convertToFloat(i) + v, err := common_templates.ConvertToFloat(i) if err != nil { return "", err } return fmt.Sprintf("%.4g%%", v*100), nil }, - "humanizeTimestamp": func(i interface{}) (string, error) { - v, err := convertToFloat(i) - if err != nil { - return "", err - } - - tm, err := floatToTime(v) - switch { - case errors.Is(err, errNaNOrInf): - return fmt.Sprintf("%.4g", v), nil - case err != nil: - return "", err - } - - return fmt.Sprint(tm), nil - }, + "humanizeTimestamp": common_templates.HumanizeTimestamp, "toTime": func(i interface{}) (*time.Time, error) { - v, err := convertToFloat(i) + v, err := common_templates.ConvertToFloat(i) if err != nil { return nil, err } From d186caead514dd81bf7f4d57629089b39ff2635a Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 29 Jul 2024 14:41:10 +0100 Subject: [PATCH 15/99] Merge pull request #14496 from bboreham/fix-nil-primary (#14509) [BUGFIX] Storage: errors from a single secondary querier should be warnings. This is a backport of #14496 to release-2.54 branch. #13434 introduced an unwanted change in behaviour: if there was no primary querier and a single secondary querier, the secondary would be treated like a primary. This PR restores the previous behaviour, that all secondary queriers report errors as warnings. In order to test this behaviour, I changed `TestMergeQuerierWithSecondaries_ErrorHandling` so it now calls `NewMergeQuerier` rather than creating the internal data structure directly. This in turn required all the data types to change, so I merged `mockGenericQuerier` into `mockQuerier`. Also replaced `unwrapMockGenericQuerier` with a visitor pattern. While I was there, I addressed the comment from https://github.com/prometheus/prometheus/pull/13434#pullrequestreview-2191058921 to short-circuit the merge of single querier with any number of no-op or nil queriers. Signed-off-by: Bryan Boreham --- storage/merge.go | 50 ++++--- storage/merge_test.go | 318 ++++++++++++++++++++++-------------------- 2 files changed, 204 insertions(+), 164 deletions(-) diff --git a/storage/merge.go b/storage/merge.go index 194494b6a..2424b26ab 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -45,25 +45,24 @@ type mergeGenericQuerier struct { // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier { + primaries = filterQueriers(primaries) + secondaries = filterQueriers(secondaries) + switch { - case len(primaries)+len(secondaries) == 0: + case len(primaries) == 0 && len(secondaries) == 0: return noopQuerier{} case len(primaries) == 1 && len(secondaries) == 0: return primaries[0] case len(primaries) == 0 && len(secondaries) == 1: - return secondaries[0] + return &querierAdapter{newSecondaryQuerierFrom(secondaries[0])} } queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { - if _, ok := q.(noopQuerier); !ok && q != nil { - queriers = append(queriers, newGenericQuerierFrom(q)) - } + queriers = append(queriers, newGenericQuerierFrom(q)) } for _, q := range secondaries { - if _, ok := q.(noopQuerier); !ok && q != nil { - queriers = append(queriers, newSecondaryQuerierFrom(q)) - } + queriers = append(queriers, newSecondaryQuerierFrom(q)) } concurrentSelect := false @@ -77,31 +76,40 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer }} } +func filterQueriers(qs []Querier) []Querier { + ret := make([]Querier, 0, len(qs)) + for _, q := range qs { + if _, ok := q.(noopQuerier); !ok && q != nil { + ret = append(ret, q) + } + } + return ret +} + // NewMergeChunkQuerier returns a new Chunk Querier that merges results of given primary and secondary chunk queriers. // See NewFanout commentary to learn more about primary vs secondary differences. // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier { + primaries = filterChunkQueriers(primaries) + secondaries = filterChunkQueriers(secondaries) + switch { case len(primaries) == 0 && len(secondaries) == 0: return noopChunkQuerier{} case len(primaries) == 1 && len(secondaries) == 0: return primaries[0] case len(primaries) == 0 && len(secondaries) == 1: - return secondaries[0] + return &chunkQuerierAdapter{newSecondaryQuerierFromChunk(secondaries[0])} } queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { - if _, ok := q.(noopChunkQuerier); !ok && q != nil { - queriers = append(queriers, newGenericQuerierFromChunk(q)) - } + queriers = append(queriers, newGenericQuerierFromChunk(q)) } - for _, querier := range secondaries { - if _, ok := querier.(noopChunkQuerier); !ok && querier != nil { - queriers = append(queriers, newSecondaryQuerierFromChunk(querier)) - } + for _, q := range secondaries { + queriers = append(queriers, newSecondaryQuerierFromChunk(q)) } concurrentSelect := false @@ -115,6 +123,16 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica }} } +func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier { + ret := make([]ChunkQuerier, 0, len(qs)) + for _, q := range qs { + if _, ok := q.(noopChunkQuerier); !ok && q != nil { + ret = append(ret, q) + } + } + return ret +} + // Select returns a set of series that matches the given label matchers. func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) diff --git a/storage/merge_test.go b/storage/merge_test.go index 7619af3c1..b145743c8 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -912,9 +912,23 @@ func TestConcatenatingChunkIterator(t *testing.T) { } type mockQuerier struct { - LabelQuerier + mtx sync.Mutex - toReturn []Series + toReturn []Series // Response for Select. + + closed bool + labelNamesCalls int + labelNamesRequested []labelNameRequest + sortedSeriesRequested []bool + + resp []string // Response for LabelNames and LabelValues; turned into Select response if toReturn is not supplied. + warnings annotations.Annotations + err error +} + +type labelNameRequest struct { + name string + matchers []*labels.Matcher } type seriesByLabel []Series @@ -924,13 +938,47 @@ func (a seriesByLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a seriesByLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 } func (m *mockQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) SeriesSet { - cpy := make([]Series, len(m.toReturn)) - copy(cpy, m.toReturn) + m.mtx.Lock() + defer m.mtx.Unlock() + m.sortedSeriesRequested = append(m.sortedSeriesRequested, sortSeries) + + var ret []Series + if len(m.toReturn) > 0 { + ret = make([]Series, len(m.toReturn)) + copy(ret, m.toReturn) + } else if len(m.resp) > 0 { + ret = make([]Series, 0, len(m.resp)) + for _, l := range m.resp { + ret = append(ret, NewListSeries(labels.FromStrings("test", l), nil)) + } + } if sortSeries { - sort.Sort(seriesByLabel(cpy)) + sort.Sort(seriesByLabel(ret)) } - return NewMockSeriesSet(cpy...) + return &mockSeriesSet{idx: -1, series: ret, warnings: m.warnings, err: m.err} +} + +func (m *mockQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + m.mtx.Lock() + m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{ + name: name, + matchers: matchers, + }) + m.mtx.Unlock() + return m.resp, m.warnings, m.err +} + +func (m *mockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { + m.mtx.Lock() + m.labelNamesCalls++ + m.mtx.Unlock() + return m.resp, m.warnings, m.err +} + +func (m *mockQuerier) Close() error { + m.closed = true + return nil } type mockChunkQuerier struct { @@ -960,6 +1008,9 @@ func (m *mockChunkQuerier) Select(_ context.Context, sortSeries bool, _ *SelectH type mockSeriesSet struct { idx int series []Series + + warnings annotations.Annotations + err error } func NewMockSeriesSet(series ...Series) SeriesSet { @@ -970,15 +1021,18 @@ func NewMockSeriesSet(series ...Series) SeriesSet { } func (m *mockSeriesSet) Next() bool { + if m.err != nil { + return false + } m.idx++ return m.idx < len(m.series) } func (m *mockSeriesSet) At() Series { return m.series[m.idx] } -func (m *mockSeriesSet) Err() error { return nil } +func (m *mockSeriesSet) Err() error { return m.err } -func (m *mockSeriesSet) Warnings() annotations.Annotations { return nil } +func (m *mockSeriesSet) Warnings() annotations.Annotations { return m.warnings } type mockChunkSeriesSet struct { idx int @@ -1336,105 +1390,44 @@ func BenchmarkMergeSeriesSet(b *testing.B) { } } -type mockGenericQuerier struct { - mtx sync.Mutex - - closed bool - labelNamesCalls int - labelNamesRequested []labelNameRequest - sortedSeriesRequested []bool - - resp []string - warnings annotations.Annotations - err error -} - -type labelNameRequest struct { - name string - matchers []*labels.Matcher -} - -func (m *mockGenericQuerier) Select(_ context.Context, b bool, _ *SelectHints, _ ...*labels.Matcher) genericSeriesSet { - m.mtx.Lock() - m.sortedSeriesRequested = append(m.sortedSeriesRequested, b) - m.mtx.Unlock() - return &mockGenericSeriesSet{resp: m.resp, warnings: m.warnings, err: m.err} -} - -func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - m.mtx.Lock() - m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{ - name: name, - matchers: matchers, - }) - m.mtx.Unlock() - return m.resp, m.warnings, m.err -} - -func (m *mockGenericQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { - m.mtx.Lock() - m.labelNamesCalls++ - m.mtx.Unlock() - return m.resp, m.warnings, m.err -} - -func (m *mockGenericQuerier) Close() error { - m.closed = true - return nil -} - -type mockGenericSeriesSet struct { - resp []string - warnings annotations.Annotations - err error - - curr int -} - -func (m *mockGenericSeriesSet) Next() bool { - if m.err != nil { - return false +func visitMockQueriers(t *testing.T, qr Querier, f func(t *testing.T, q *mockQuerier)) int { + count := 0 + switch x := qr.(type) { + case *mockQuerier: + count++ + f(t, x) + case *querierAdapter: + count += visitMockQueriersInGenericQuerier(t, x.genericQuerier, f) } - if m.curr >= len(m.resp) { - return false + return count +} + +func visitMockQueriersInGenericQuerier(t *testing.T, g genericQuerier, f func(t *testing.T, q *mockQuerier)) int { + count := 0 + switch x := g.(type) { + case *mergeGenericQuerier: + for _, q := range x.queriers { + count += visitMockQueriersInGenericQuerier(t, q, f) + } + case *genericQuerierAdapter: + // Visitor for chunkQuerier not implemented. + count += visitMockQueriers(t, x.q, f) + case *secondaryQuerier: + count += visitMockQueriersInGenericQuerier(t, x.genericQuerier, f) } - m.curr++ - return true + return count } -func (m *mockGenericSeriesSet) Err() error { return m.err } -func (m *mockGenericSeriesSet) Warnings() annotations.Annotations { return m.warnings } - -func (m *mockGenericSeriesSet) At() Labels { - return mockLabels(m.resp[m.curr-1]) -} - -type mockLabels string - -func (l mockLabels) Labels() labels.Labels { - return labels.FromStrings("test", string(l)) -} - -func unwrapMockGenericQuerier(t *testing.T, qr genericQuerier) *mockGenericQuerier { - m, ok := qr.(*mockGenericQuerier) - if !ok { - s, ok := qr.(*secondaryQuerier) - require.True(t, ok, "expected secondaryQuerier got something else") - m, ok = s.genericQuerier.(*mockGenericQuerier) - require.True(t, ok, "expected mockGenericQuerier got something else") - } - return m -} - -func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { +func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) { var ( errStorage = errors.New("storage error") warnStorage = errors.New("storage warning") ctx = context.Background() ) for _, tcase := range []struct { - name string - queriers []genericQuerier + name string + primaries []Querier + secondaries []Querier expectedSelectsSeries []labels.Labels expectedLabels []string @@ -1443,10 +1436,8 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { expectedErrs [4]error }{ { - // NewMergeQuerier will not create a mergeGenericQuerier - // with just one querier inside, but we can test it anyway. - name: "one successful primary querier", - queriers: []genericQuerier{&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}}, + name: "one successful primary querier", + primaries: []Querier{&mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}}, expectedSelectsSeries: []labels.Labels{ labels.FromStrings("test", "a"), labels.FromStrings("test", "b"), @@ -1455,9 +1446,9 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { }, { name: "multiple successful primary queriers", - queriers: []genericQuerier{ - &mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}, - &mockGenericQuerier{resp: []string{"b", "c"}, warnings: nil, err: nil}, + primaries: []Querier{ + &mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}, + &mockQuerier{resp: []string{"b", "c"}, warnings: nil, err: nil}, }, expectedSelectsSeries: []labels.Labels{ labels.FromStrings("test", "a"), @@ -1468,15 +1459,17 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { }, { name: "one failed primary querier", - queriers: []genericQuerier{&mockGenericQuerier{warnings: nil, err: errStorage}}, + primaries: []Querier{&mockQuerier{warnings: nil, err: errStorage}}, expectedErrs: [4]error{errStorage, errStorage, errStorage, errStorage}, }, { name: "one successful primary querier with successful secondaries", - queriers: []genericQuerier{ - &mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}}, + primaries: []Querier{ + &mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}, + }, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: nil, err: nil}, + &mockQuerier{resp: []string{"c"}, warnings: nil, err: nil}, }, expectedSelectsSeries: []labels.Labels{ labels.FromStrings("test", "a"), @@ -1487,10 +1480,12 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { }, { name: "one successful primary querier with empty response and successful secondaries", - queriers: []genericQuerier{ - &mockGenericQuerier{resp: []string{}, warnings: nil, err: nil}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}}, + primaries: []Querier{ + &mockQuerier{resp: []string{}, warnings: nil, err: nil}, + }, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: nil, err: nil}, + &mockQuerier{resp: []string{"c"}, warnings: nil, err: nil}, }, expectedSelectsSeries: []labels.Labels{ labels.FromStrings("test", "b"), @@ -1500,19 +1495,42 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { }, { name: "one failed primary querier with successful secondaries", - queriers: []genericQuerier{ - &mockGenericQuerier{warnings: nil, err: errStorage}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}}, + primaries: []Querier{ + &mockQuerier{warnings: nil, err: errStorage}, + }, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: nil, err: nil}, + &mockQuerier{resp: []string{"c"}, warnings: nil, err: nil}, }, expectedErrs: [4]error{errStorage, errStorage, errStorage, errStorage}, }, + { + name: "nil primary querier with failed secondary", + primaries: nil, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage}, + }, + expectedLabels: []string{}, + expectedWarnings: annotations.New().Add(errStorage), + }, + { + name: "nil primary querier with two failed secondaries", + primaries: nil, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage}, + &mockQuerier{resp: []string{"c"}, warnings: nil, err: errStorage}, + }, + expectedLabels: []string{}, + expectedWarnings: annotations.New().Add(errStorage), + }, { name: "one successful primary querier with failed secondaries", - queriers: []genericQuerier{ - &mockGenericQuerier{resp: []string{"a"}, warnings: nil, err: nil}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: errStorage}}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: errStorage}}, + primaries: []Querier{ + &mockQuerier{resp: []string{"a"}, warnings: nil, err: nil}, + }, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage}, + &mockQuerier{resp: []string{"c"}, warnings: nil, err: errStorage}, }, expectedSelectsSeries: []labels.Labels{ labels.FromStrings("test", "a"), @@ -1522,9 +1540,11 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { }, { name: "successful queriers with warnings", - queriers: []genericQuerier{ - &mockGenericQuerier{resp: []string{"a"}, warnings: annotations.New().Add(warnStorage), err: nil}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: annotations.New().Add(warnStorage), err: nil}}, + primaries: []Querier{ + &mockQuerier{resp: []string{"a"}, warnings: annotations.New().Add(warnStorage), err: nil}, + }, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: annotations.New().Add(warnStorage), err: nil}, }, expectedSelectsSeries: []labels.Labels{ labels.FromStrings("test", "a"), @@ -1535,10 +1555,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { }, } { t.Run(tcase.name, func(t *testing.T) { - q := &mergeGenericQuerier{ - queriers: tcase.queriers, - mergeFn: func(l ...Labels) Labels { return l[0] }, - } + q := NewMergeQuerier(tcase.primaries, tcase.secondaries, func(s ...Series) Series { return s[0] }) t.Run("Select", func(t *testing.T) { res := q.Select(context.Background(), false, nil) @@ -1551,65 +1568,70 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { require.ErrorIs(t, res.Err(), tcase.expectedErrs[0], "expected error doesn't match") require.Equal(t, tcase.expectedSelectsSeries, lbls) - for _, qr := range q.queriers { - m := unwrapMockGenericQuerier(t, qr) - // mergeGenericQuerier forces all Selects to be sorted. - require.Equal(t, []bool{true}, m.sortedSeriesRequested) - } + n := visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) { + // Single queries should be unsorted; merged queries sorted. + exp := len(tcase.primaries)+len(tcase.secondaries) > 1 + require.Equal(t, []bool{exp}, m.sortedSeriesRequested) + }) + // Check we visited all queriers. + require.Equal(t, len(tcase.primaries)+len(tcase.secondaries), n) }) t.Run("LabelNames", func(t *testing.T) { res, w, err := q.LabelNames(ctx, nil) require.Subset(t, tcase.expectedWarnings, w) require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match") - require.Equal(t, tcase.expectedLabels, res) + requireEqualSlice(t, tcase.expectedLabels, res) if err != nil { return } - for _, qr := range q.queriers { - m := unwrapMockGenericQuerier(t, qr) - + visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) { require.Equal(t, 1, m.labelNamesCalls) - } + }) }) t.Run("LabelValues", func(t *testing.T) { res, w, err := q.LabelValues(ctx, "test", nil) require.Subset(t, tcase.expectedWarnings, w) require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match") - require.Equal(t, tcase.expectedLabels, res) + requireEqualSlice(t, tcase.expectedLabels, res) if err != nil { return } - for _, qr := range q.queriers { - m := unwrapMockGenericQuerier(t, qr) - + visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) { require.Equal(t, []labelNameRequest{{name: "test"}}, m.labelNamesRequested) - } + }) }) t.Run("LabelValuesWithMatchers", func(t *testing.T) { matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue") res, w, err := q.LabelValues(ctx, "test2", nil, matcher) require.Subset(t, tcase.expectedWarnings, w) require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match") - require.Equal(t, tcase.expectedLabels, res) + requireEqualSlice(t, tcase.expectedLabels, res) if err != nil { return } - for _, qr := range q.queriers { - m := unwrapMockGenericQuerier(t, qr) - + visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) { require.Equal(t, []labelNameRequest{ {name: "test"}, {name: "test2", matchers: []*labels.Matcher{matcher}}, }, m.labelNamesRequested) - } + }) }) }) } } +// Check slice but ignore difference between nil and empty. +func requireEqualSlice[T any](t require.TestingT, a, b []T, msgAndArgs ...interface{}) { + if len(a) == 0 { + require.Empty(t, b, msgAndArgs...) + } else { + require.Equal(t, a, b, msgAndArgs...) + } +} + type errIterator struct { err error } From 03963b9ba059af04a50f869578519ff159bf2b14 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 30 Jul 2024 10:11:16 +0100 Subject: [PATCH 16/99] Merge pull request #14515 from prometheus/revert-13777-remoteread2 (#14524) Revert "Chunked remote read: close the querier earlier" Signed-off-by: Bryan Boreham --- storage/remote/read_handler.go | 53 ++++++++++++++-------------------- 1 file changed, 21 insertions(+), 32 deletions(-) diff --git a/storage/remote/read_handler.go b/storage/remote/read_handler.go index 2a00ce897..ffc64c9c3 100644 --- a/storage/remote/read_handler.go +++ b/storage/remote/read_handler.go @@ -202,16 +202,34 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re return err } - chunks := h.getChunkSeriesSet(ctx, query, filteredMatchers) - if err := chunks.Err(); err != nil { + querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs) + if err != nil { return err } + defer func() { + if err := querier.Close(); err != nil { + level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error()) + } + }() + + var hints *storage.SelectHints + if query.Hints != nil { + hints = &storage.SelectHints{ + Start: query.Hints.StartMs, + End: query.Hints.EndMs, + Step: query.Hints.StepMs, + Func: query.Hints.Func, + Grouping: query.Hints.Grouping, + Range: query.Hints.RangeMs, + By: query.Hints.By, + } + } ws, err := StreamChunkedReadResponses( NewChunkedWriter(w, f), int64(i), // The streaming API has to provide the series sorted. - chunks, + querier.Select(ctx, true, hints, filteredMatchers...), sortedExternalLabels, h.remoteReadMaxBytesInFrame, h.marshalPool, @@ -236,35 +254,6 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re } } -// getChunkSeriesSet executes a query to retrieve a ChunkSeriesSet, -// encapsulating the operation in its own function to ensure timely release of -// the querier resources. -func (h *readHandler) getChunkSeriesSet(ctx context.Context, query *prompb.Query, filteredMatchers []*labels.Matcher) storage.ChunkSeriesSet { - querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs) - if err != nil { - return storage.ErrChunkSeriesSet(err) - } - defer func() { - if err := querier.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error()) - } - }() - - var hints *storage.SelectHints - if query.Hints != nil { - hints = &storage.SelectHints{ - Start: query.Hints.StartMs, - End: query.Hints.EndMs, - Step: query.Hints.StepMs, - Func: query.Hints.Func, - Grouping: query.Hints.Grouping, - Range: query.Hints.RangeMs, - By: query.Hints.By, - } - } - return querier.Select(ctx, true, hints, filteredMatchers...) -} - // filterExtLabelsFromMatchers change equality matchers which match external labels // to a matcher that looks for an empty label, // as that label should not be present in the storage. From 2898d5d715738776bf8dd31d424a9b297380a2f3 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 30 Jul 2024 10:15:23 +0100 Subject: [PATCH 17/99] Add #14515 to CHANGELOG Signed-off-by: Bryan Boreham --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 02ffc5e4b..115055d12 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ Remote-write v2 is enabled by default, but can be disabled via feature-flag `web * [BUGFIX] Remote-Write: Fix data corruption in remote write if max_sample_age is applied. #14078 * [BUGFIX] Notifier: Fix Alertmanager discovery not updating under heavy load. #14174 * [BUGFIX] Regexes: some Unicode characters were not matched by case-insensitive comparison. #14170,#14299 +* [BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515 ## 2.53.1 / 2024-07-10 From d90c5a71d7fabf13c00925a4f7a4011355a94681 Mon Sep 17 00:00:00 2001 From: Owen Williams Date: Thu, 1 Aug 2024 10:07:08 -0400 Subject: [PATCH 18/99] support quoting in grouping label lists Signed-off-by: Owen Williams --- promql/parser/generated_parser.y | 12 +- promql/parser/generated_parser.y.go | 785 ++++++++++++++-------------- promql/parser/lex.go | 13 - promql/parser/parse_test.go | 46 ++ promql/parser/printer.go | 14 +- promql/parser/printer_test.go | 11 + 6 files changed, 480 insertions(+), 401 deletions(-) diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index b8e6aa373..da24be0c4 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -23,6 +23,8 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/promql/parser/posrange" + + "github.com/prometheus/common/model" ) %} @@ -360,11 +362,19 @@ grouping_label_list: grouping_label : maybe_label { - if !isLabel($1.Val) { + if !model.LabelName($1.Val).IsValid() { yylex.(*parser).unexpected("grouping opts", "label") } $$ = $1 } + | STRING { + if !model.LabelName(yylex.(*parser).unquoteString($1.Val)).IsValid() { + yylex.(*parser).unexpected("grouping opts", "label") + } + $$ = $1 + $$.Pos++ + $$.Val = yylex.(*parser).unquoteString($$.Val) + } | error { yylex.(*parser).unexpected("grouping opts", "label"); $$ = Item{} } ; diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 62786052e..22231f73e 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -12,6 +12,8 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql/parser/posrange" + + "github.com/prometheus/common/model" ) type yySymType struct { @@ -249,290 +251,293 @@ var yyExca = [...]int16{ 1, -1, -2, 0, -1, 37, - 1, 137, - 10, 137, - 24, 137, + 1, 138, + 10, 138, + 24, 138, -2, 0, -1, 61, - 2, 180, - 15, 180, - 79, 180, - 85, 180, - -2, 101, - -1, 62, 2, 181, 15, 181, 79, 181, 85, 181, -2, 102, - -1, 63, + -1, 62, 2, 182, 15, 182, 79, 182, 85, 182, - -2, 104, - -1, 64, + -2, 103, + -1, 63, 2, 183, 15, 183, 79, 183, 85, 183, -2, 105, - -1, 65, + -1, 64, 2, 184, 15, 184, 79, 184, 85, 184, -2, 106, - -1, 66, + -1, 65, 2, 185, 15, 185, 79, 185, 85, 185, - -2, 111, - -1, 67, + -2, 107, + -1, 66, 2, 186, 15, 186, 79, 186, 85, 186, - -2, 113, - -1, 68, + -2, 112, + -1, 67, 2, 187, 15, 187, 79, 187, 85, 187, - -2, 115, - -1, 69, + -2, 114, + -1, 68, 2, 188, 15, 188, 79, 188, 85, 188, -2, 116, - -1, 70, + -1, 69, 2, 189, 15, 189, 79, 189, 85, 189, -2, 117, - -1, 71, + -1, 70, 2, 190, 15, 190, 79, 190, 85, 190, -2, 118, - -1, 72, + -1, 71, 2, 191, 15, 191, 79, 191, 85, 191, -2, 119, - -1, 73, + -1, 72, 2, 192, 15, 192, 79, 192, 85, 192, - -2, 123, - -1, 74, + -2, 120, + -1, 73, 2, 193, 15, 193, 79, 193, 85, 193, -2, 124, + -1, 74, + 2, 194, + 15, 194, + 79, 194, + 85, 194, + -2, 125, -1, 200, - 9, 242, - 12, 242, - 13, 242, - 18, 242, - 19, 242, - 25, 242, - 41, 242, - 47, 242, - 48, 242, - 51, 242, - 57, 242, - 62, 242, - 63, 242, - 64, 242, - 65, 242, - 66, 242, - 67, 242, - 68, 242, - 69, 242, - 70, 242, - 71, 242, - 72, 242, - 73, 242, - 74, 242, - 75, 242, - 79, 242, - 83, 242, - 85, 242, - 88, 242, - 89, 242, + 9, 243, + 12, 243, + 13, 243, + 18, 243, + 19, 243, + 25, 243, + 41, 243, + 47, 243, + 48, 243, + 51, 243, + 57, 243, + 62, 243, + 63, 243, + 64, 243, + 65, 243, + 66, 243, + 67, 243, + 68, 243, + 69, 243, + 70, 243, + 71, 243, + 72, 243, + 73, 243, + 74, 243, + 75, 243, + 79, 243, + 83, 243, + 85, 243, + 88, 243, + 89, 243, -2, 0, -1, 201, - 9, 242, - 12, 242, - 13, 242, - 18, 242, - 19, 242, - 25, 242, - 41, 242, - 47, 242, - 48, 242, - 51, 242, - 57, 242, - 62, 242, - 63, 242, - 64, 242, - 65, 242, - 66, 242, - 67, 242, - 68, 242, - 69, 242, - 70, 242, - 71, 242, - 72, 242, - 73, 242, - 74, 242, - 75, 242, - 79, 242, - 83, 242, - 85, 242, - 88, 242, - 89, 242, + 9, 243, + 12, 243, + 13, 243, + 18, 243, + 19, 243, + 25, 243, + 41, 243, + 47, 243, + 48, 243, + 51, 243, + 57, 243, + 62, 243, + 63, 243, + 64, 243, + 65, 243, + 66, 243, + 67, 243, + 68, 243, + 69, 243, + 70, 243, + 71, 243, + 72, 243, + 73, 243, + 74, 243, + 75, 243, + 79, 243, + 83, 243, + 85, 243, + 88, 243, + 89, 243, -2, 0, } const yyPrivate = 57344 -const yyLast = 763 +const yyLast = 799 var yyAct = [...]int16{ - 155, 333, 331, 275, 338, 152, 226, 39, 192, 44, - 290, 289, 156, 118, 82, 178, 106, 55, 109, 105, - 53, 77, 133, 56, 110, 108, 22, 54, 356, 6, - 172, 107, 60, 57, 345, 346, 347, 348, 111, 198, - 328, 199, 200, 201, 327, 154, 303, 355, 266, 75, - 354, 151, 160, 128, 259, 18, 19, 160, 55, 20, - 301, 101, 159, 104, 113, 76, 114, 159, 54, 258, - 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, - 71, 72, 73, 74, 161, 112, 269, 13, 103, 161, - 292, 24, 115, 30, 309, 265, 31, 32, 332, 267, - 162, 270, 109, 223, 323, 162, 150, 222, 110, 308, - 301, 263, 310, 149, 161, 163, 307, 271, 264, 173, - 167, 170, 221, 322, 166, 2, 3, 4, 5, 194, - 162, 157, 158, 179, 262, 180, 184, 197, 165, 186, - 196, 195, 202, 203, 204, 205, 206, 207, 208, 209, - 210, 211, 212, 213, 214, 215, 216, 129, 188, 121, - 217, 121, 219, 220, 55, 38, 218, 53, 77, 119, - 56, 119, 339, 22, 54, 182, 169, 260, 298, 117, - 57, 187, 122, 297, 122, 181, 183, 160, 295, 168, - 261, 180, 111, 77, 164, 55, 75, 159, 296, 357, - 7, 55, 18, 19, 268, 54, 20, 294, 35, 287, - 288, 54, 76, 291, 321, 320, 319, 61, 62, 63, + 155, 334, 332, 276, 339, 152, 226, 39, 192, 44, + 291, 290, 156, 118, 82, 178, 229, 107, 106, 346, + 347, 348, 349, 109, 108, 198, 239, 199, 133, 110, + 105, 60, 245, 121, 6, 329, 325, 111, 328, 228, + 200, 201, 160, 119, 304, 267, 293, 128, 260, 160, + 151, 261, 159, 302, 358, 311, 122, 55, 89, 159, + 196, 241, 242, 259, 113, 243, 114, 54, 98, 99, + 302, 112, 101, 256, 104, 88, 230, 232, 234, 235, + 236, 244, 246, 249, 250, 251, 252, 253, 257, 258, + 160, 115, 231, 233, 237, 238, 240, 247, 248, 103, + 159, 109, 254, 255, 324, 150, 357, 110, 333, 218, + 111, 340, 310, 149, 77, 163, 7, 105, 35, 173, + 167, 170, 161, 323, 165, 356, 166, 309, 355, 194, + 2, 3, 4, 5, 308, 322, 184, 197, 162, 186, + 321, 195, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 229, 129, 101, + 217, 104, 219, 220, 190, 266, 270, 239, 160, 121, + 268, 193, 264, 245, 55, 196, 154, 225, 159, 119, + 228, 271, 188, 160, 54, 161, 103, 117, 265, 84, + 262, 299, 122, 159, 320, 263, 298, 272, 10, 83, + 161, 162, 241, 242, 269, 187, 243, 185, 79, 288, + 289, 297, 319, 292, 256, 161, 162, 230, 232, 234, + 235, 236, 244, 246, 249, 250, 251, 252, 253, 257, + 258, 162, 294, 231, 233, 237, 238, 240, 247, 248, + 318, 317, 316, 254, 255, 180, 315, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 157, 158, 169, 105, 314, 296, 300, 301, + 303, 223, 305, 313, 55, 222, 179, 168, 180, 84, + 306, 307, 177, 125, 54, 182, 295, 176, 124, 83, + 221, 312, 87, 89, 8, 181, 183, 81, 37, 86, + 175, 123, 36, 98, 99, 326, 327, 101, 102, 104, + 88, 127, 331, 126, 50, 336, 337, 338, 182, 335, + 78, 1, 342, 341, 344, 343, 49, 48, 181, 183, + 350, 351, 47, 55, 103, 352, 53, 77, 164, 56, + 46, 354, 22, 54, 59, 55, 172, 9, 9, 57, + 132, 45, 43, 130, 171, 54, 359, 42, 131, 41, + 40, 51, 191, 353, 273, 75, 85, 189, 224, 80, + 345, 18, 19, 120, 153, 20, 58, 227, 52, 116, + 0, 76, 0, 0, 0, 0, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 0, 0, 0, 13, 0, 0, 0, 24, 0, 30, + 0, 0, 31, 32, 55, 38, 0, 53, 77, 0, + 56, 275, 0, 22, 54, 0, 0, 0, 274, 0, + 57, 0, 278, 279, 277, 284, 286, 283, 285, 280, + 281, 282, 287, 0, 0, 0, 75, 0, 0, 0, + 0, 0, 18, 19, 0, 0, 20, 0, 0, 0, + 0, 0, 76, 0, 0, 0, 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 182, 293, 318, 13, 160, 317, 316, 24, 315, - 30, 181, 183, 31, 32, 159, 134, 135, 136, 137, - 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, - 148, 314, 313, 55, 105, 84, 84, 299, 300, 302, - 86, 304, 177, 54, 190, 83, 83, 176, 160, 305, - 306, 193, 125, 185, 81, 196, 10, 124, 159, 312, - 175, 311, 89, 50, 8, 36, 79, 228, 37, 78, - 123, 1, 98, 99, 325, 326, 101, 238, 104, 88, - 161, 330, 49, 244, 335, 336, 337, 324, 334, 48, - 47, 341, 340, 343, 342, 127, 162, 126, 59, 349, - 350, 9, 9, 103, 351, 46, 132, 45, 43, 130, - 353, 171, 240, 241, 42, 131, 242, 41, 40, 51, - 191, 352, 272, 85, 255, 358, 189, 229, 231, 233, - 234, 235, 243, 245, 248, 249, 250, 251, 252, 256, - 257, 224, 80, 230, 232, 236, 237, 239, 246, 247, - 344, 120, 55, 253, 254, 53, 77, 153, 56, 274, - 58, 22, 54, 227, 52, 116, 273, 0, 57, 0, - 277, 278, 276, 283, 285, 282, 284, 279, 280, 281, - 286, 0, 0, 0, 75, 0, 0, 0, 0, 0, - 18, 19, 0, 0, 20, 0, 0, 0, 0, 0, - 76, 0, 0, 0, 0, 61, 62, 63, 64, 65, - 66, 67, 68, 69, 70, 71, 72, 73, 74, 228, - 0, 0, 13, 0, 0, 0, 24, 0, 30, 238, - 329, 31, 32, 0, 0, 244, 0, 0, 0, 225, - 0, 277, 278, 276, 283, 285, 282, 284, 279, 280, - 281, 286, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 240, 241, 0, 0, 242, 0, - 0, 0, 17, 77, 0, 105, 255, 0, 22, 229, - 231, 233, 234, 235, 243, 245, 248, 249, 250, 251, - 252, 256, 257, 0, 0, 230, 232, 236, 237, 239, - 246, 247, 87, 89, 0, 253, 254, 18, 19, 0, - 0, 20, 0, 98, 99, 17, 35, 101, 102, 104, - 88, 22, 11, 12, 14, 15, 16, 21, 23, 25, - 26, 27, 28, 29, 33, 34, 0, 0, 0, 13, - 0, 0, 0, 24, 103, 30, 0, 0, 31, 32, - 18, 19, 0, 0, 20, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 11, 12, 14, 15, 16, - 21, 23, 25, 26, 27, 28, 29, 33, 34, 105, - 0, 0, 13, 0, 0, 0, 24, 174, 30, 0, - 0, 31, 32, 0, 0, 0, 0, 0, 105, 0, - 0, 0, 0, 0, 0, 0, 87, 89, 90, 0, - 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, - 0, 101, 102, 104, 88, 87, 89, 90, 0, 91, - 92, 93, 94, 95, 96, 97, 98, 99, 100, 0, - 101, 102, 104, 88, 105, 0, 0, 0, 103, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 105, 0, 0, 0, 103, 0, 0, - 0, 87, 89, 90, 0, 91, 92, 93, 0, 95, + 74, 0, 0, 0, 13, 0, 0, 0, 24, 0, + 30, 0, 55, 31, 32, 53, 77, 0, 56, 330, + 0, 22, 54, 0, 0, 0, 0, 0, 57, 0, + 278, 279, 277, 284, 286, 283, 285, 280, 281, 282, + 287, 0, 0, 0, 75, 0, 0, 0, 0, 0, + 18, 19, 0, 0, 20, 0, 0, 0, 17, 77, + 76, 0, 0, 0, 22, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, + 0, 0, 13, 0, 0, 0, 24, 0, 30, 0, + 0, 31, 32, 18, 19, 0, 0, 20, 0, 0, + 0, 17, 35, 0, 0, 0, 0, 22, 11, 12, + 14, 15, 16, 21, 23, 25, 26, 27, 28, 29, + 33, 34, 0, 0, 0, 13, 0, 0, 0, 24, + 0, 30, 0, 0, 31, 32, 18, 19, 0, 0, + 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 11, 12, 14, 15, 16, 21, 23, 25, 26, + 27, 28, 29, 33, 34, 105, 0, 0, 13, 0, + 0, 0, 24, 174, 30, 0, 0, 31, 32, 0, + 0, 0, 0, 0, 105, 0, 0, 0, 0, 0, + 0, 0, 87, 89, 90, 0, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 0, 101, 102, 104, + 88, 87, 89, 90, 0, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 0, 101, 102, 104, 88, - 87, 89, 90, 0, 91, 92, 0, 0, 95, 96, - 0, 98, 99, 100, 0, 101, 102, 104, 88, 0, - 0, 0, 0, 103, 0, 0, 0, 0, 0, 0, + 105, 0, 0, 0, 103, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 105, + 0, 0, 0, 103, 0, 0, 0, 87, 89, 90, + 0, 91, 92, 93, 0, 95, 96, 97, 98, 99, + 100, 0, 101, 102, 104, 88, 87, 89, 90, 0, + 91, 92, 0, 0, 95, 96, 0, 98, 99, 100, + 0, 101, 102, 104, 88, 0, 0, 0, 0, 103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 103, + 0, 0, 0, 0, 0, 0, 0, 0, 103, } var yyPact = [...]int16{ - 27, 190, 533, 533, 155, 490, -1000, -1000, -1000, 195, + 32, 106, 569, 569, 405, 526, -1000, -1000, -1000, 105, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 264, -1000, 268, -1000, 614, + -1000, -1000, -1000, -1000, -1000, 277, -1000, 297, -1000, 650, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 23, 177, -1000, -1000, 373, -1000, 373, 180, + -1000, -1000, 22, 95, -1000, -1000, 483, -1000, 483, 101, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 159, -1000, -1000, - 280, -1000, -1000, 323, -1000, 29, -1000, -56, -56, -56, - -56, -56, -56, -56, -56, -56, -56, -56, -56, -56, - -56, -56, -56, 49, 43, 192, 177, -61, -1000, 174, - 174, 8, -1000, 595, 5, -1000, 270, -1000, -1000, 131, - 187, -1000, -1000, -1000, 263, -1000, 156, -1000, 269, 373, - -1000, -43, -38, -1000, 373, 373, 373, 373, 373, 373, - 373, 373, 373, 373, 373, 373, 373, 373, 373, -1000, - 254, -1000, -1000, 151, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 226, 226, 101, -1000, -1000, -1000, -1000, 447, -1000, - -1000, 47, -1000, 614, -1000, -1000, 157, -1000, 109, -1000, - -1000, -1000, -1000, -1000, 93, -1000, -1000, -1000, -1000, -1000, - 22, 73, 60, -1000, -1000, -1000, 372, 250, 174, 174, - 174, 174, 5, 5, 491, 491, 491, 679, 660, 491, - 491, 679, 5, 5, 491, 5, 250, -1000, 68, -1000, - -1000, -1000, 186, -1000, 176, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 167, -1000, -1000, + 281, -1000, -1000, 309, -1000, 23, -1000, -50, -50, -50, + -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, + -50, -50, -50, 48, 174, 336, 95, -56, -1000, 262, + 262, 324, -1000, 631, 103, -1000, 280, -1000, -1000, 274, + 241, -1000, -1000, -1000, 187, -1000, 180, -1000, 159, 483, + -1000, -57, -40, -1000, 483, 483, 483, 483, 483, 483, + 483, 483, 483, 483, 483, 483, 483, 483, 483, -1000, + 165, -1000, -1000, 94, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 40, 40, 269, -1000, -1000, -1000, -1000, 155, -1000, + -1000, 41, -1000, 650, -1000, -1000, 31, -1000, 170, -1000, + -1000, -1000, -1000, -1000, 163, -1000, -1000, -1000, -1000, -1000, + 19, 144, 140, -1000, -1000, -1000, 404, 16, 262, 262, + 262, 262, 103, 103, 251, 251, 251, 715, 696, 251, + 251, 715, 103, 103, 251, 103, 16, -1000, 24, -1000, + -1000, -1000, 265, -1000, 189, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 373, - -1000, -1000, -1000, -1000, -1000, -1000, 91, 91, 20, 91, - 124, 124, 92, 95, -1000, -1000, 285, 283, 256, 255, - 233, 231, 230, 227, 210, 209, 208, -1000, -1000, -1000, - -1000, -1000, -1000, 102, -1000, -1000, -1000, 295, -1000, 614, - -1000, -1000, -1000, 91, -1000, 18, 14, 443, -1000, -1000, - -1000, 41, 48, 226, 226, 226, 158, 158, 41, 158, - 41, -58, -1000, -1000, -1000, -1000, -1000, 91, 91, -1000, - -1000, -1000, 91, -1000, -1000, -1000, -1000, -1000, -1000, 226, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 26, -1000, 178, -1000, -1000, -1000, -1000, + 483, -1000, -1000, -1000, -1000, -1000, -1000, 34, 34, 18, + 34, 44, 44, 110, 38, -1000, -1000, 285, 267, 260, + 240, 236, 235, 234, 206, 188, 134, 129, -1000, -1000, + -1000, -1000, -1000, -1000, 102, -1000, -1000, -1000, 14, -1000, + 650, -1000, -1000, -1000, 34, -1000, 12, 9, 482, -1000, + -1000, -1000, 51, 81, 40, 40, 40, 97, 97, 51, + 97, 51, -73, -1000, -1000, -1000, -1000, -1000, 34, 34, + -1000, -1000, -1000, 34, -1000, -1000, -1000, -1000, -1000, -1000, + 40, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 104, -1000, 33, -1000, -1000, -1000, -1000, } var yyPgo = [...]int16{ - 0, 395, 13, 394, 6, 15, 393, 328, 390, 387, - 381, 380, 286, 294, 372, 14, 371, 10, 11, 356, - 353, 8, 352, 3, 4, 351, 2, 1, 0, 350, - 12, 5, 349, 348, 16, 157, 347, 345, 7, 344, - 341, 31, 339, 32, 338, 9, 337, 336, 335, 320, - 319, 312, 293, 301, 295, + 0, 379, 13, 378, 6, 15, 377, 344, 376, 374, + 373, 370, 198, 294, 369, 14, 368, 10, 11, 367, + 366, 8, 364, 3, 4, 363, 2, 1, 0, 362, + 12, 5, 361, 360, 18, 158, 359, 358, 7, 357, + 354, 17, 353, 31, 352, 9, 351, 350, 340, 332, + 327, 326, 314, 321, 302, } var yyR1 = [...]int8{ @@ -542,25 +547,25 @@ var yyR1 = [...]int8{ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 35, 37, 37, 47, 47, 42, 42, 42, 42, 17, 17, 17, 17, 16, 16, 16, 4, 4, - 39, 41, 41, 40, 40, 40, 48, 46, 46, 46, - 32, 32, 32, 9, 9, 44, 50, 50, 50, 50, - 50, 50, 51, 52, 52, 52, 43, 43, 43, 1, - 1, 1, 2, 2, 2, 2, 2, 2, 2, 13, - 13, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 4, 39, 41, 41, 40, 40, 40, 48, 46, 46, + 46, 32, 32, 32, 9, 9, 44, 50, 50, 50, + 50, 50, 50, 51, 52, 52, 52, 43, 43, 43, + 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, + 13, 13, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 12, 12, 12, 12, 14, - 14, 14, 15, 15, 15, 15, 54, 20, 20, 20, - 20, 19, 19, 19, 19, 19, 19, 19, 19, 19, - 29, 29, 29, 21, 21, 21, 21, 22, 22, 22, - 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, - 23, 24, 24, 25, 25, 25, 11, 11, 11, 11, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 12, 12, 12, 12, + 14, 14, 14, 15, 15, 15, 15, 54, 20, 20, + 20, 20, 19, 19, 19, 19, 19, 19, 19, 19, + 19, 29, 29, 29, 21, 21, 21, 21, 22, 22, + 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, + 23, 23, 24, 24, 25, 25, 25, 11, 11, 11, + 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 8, 8, 5, 5, 5, 5, 45, - 45, 28, 28, 30, 30, 31, 31, 27, 26, 26, - 49, 10, 18, 18, + 6, 6, 6, 6, 8, 8, 5, 5, 5, 5, + 45, 45, 28, 28, 30, 30, 31, 31, 27, 26, + 26, 49, 10, 18, 18, } var yyR2 = [...]int8{ @@ -570,25 +575,25 @@ var yyR2 = [...]int8{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 1, 0, 1, 3, 3, 1, 1, 3, 3, 3, 4, 2, 1, 3, 1, 2, 1, 1, - 2, 3, 2, 3, 1, 2, 3, 3, 4, 3, - 3, 5, 3, 1, 1, 4, 6, 5, 6, 5, - 4, 3, 2, 2, 1, 1, 3, 4, 2, 3, - 1, 2, 3, 3, 1, 3, 3, 2, 1, 2, + 1, 2, 3, 2, 3, 1, 2, 3, 3, 4, + 3, 3, 5, 3, 1, 1, 4, 6, 5, 6, + 5, 4, 3, 2, 2, 1, 1, 3, 4, 2, + 3, 1, 2, 3, 3, 1, 3, 3, 2, 1, + 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 3, 4, 2, 0, 3, - 1, 2, 3, 3, 2, 1, 2, 0, 3, 2, - 1, 1, 3, 1, 3, 4, 1, 3, 5, 5, - 1, 1, 1, 4, 3, 3, 2, 3, 1, 2, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 4, 3, 3, 1, 2, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 3, 4, 2, 0, + 3, 1, 2, 3, 3, 2, 1, 2, 0, 3, + 2, 1, 1, 3, 1, 3, 4, 1, 3, 5, + 5, 1, 1, 1, 4, 3, 3, 2, 3, 1, + 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 4, 3, 3, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 2, 2, 1, 1, 1, 2, 1, - 1, 1, 0, 1, + 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, + 1, 1, 1, 0, 1, } var yyChk = [...]int16{ @@ -614,59 +619,59 @@ var yyChk = [...]int16{ 5, -29, -21, 12, -28, -30, 16, -38, 82, 84, 80, 81, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -45, 15, -28, - -28, 21, 6, 2, -16, 22, -4, -6, 2, 62, - 78, 63, 79, 64, 65, 66, 80, 81, 12, 82, - 47, 48, 51, 67, 18, 68, 83, 84, 69, 70, - 71, 72, 73, 88, 89, 59, 74, 75, 22, 7, - 20, -2, 25, 2, 25, 2, 26, 26, -30, 26, - 41, 57, -22, 24, 17, -23, 30, 28, 29, 35, - 36, 37, 33, 31, 34, 32, 38, -17, -17, -18, - -17, -18, 22, -45, 21, 2, 22, 7, 2, -38, - -27, 19, -27, 26, -27, -21, -21, 24, 17, 2, - 17, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 21, 2, 22, -4, -27, 26, 26, 17, - -23, -26, 57, -27, -31, -28, -28, -28, -24, 14, - -24, -26, -24, -26, -11, 92, 93, 94, 95, -27, - -27, -27, -25, -28, 24, 21, 2, 21, -28, + -28, 21, 6, 2, -16, 22, -4, -6, 25, 2, + 62, 78, 63, 79, 64, 65, 66, 80, 81, 12, + 82, 47, 48, 51, 67, 18, 68, 83, 84, 69, + 70, 71, 72, 73, 88, 89, 59, 74, 75, 22, + 7, 20, -2, 25, 2, 25, 2, 26, 26, -30, + 26, 41, 57, -22, 24, 17, -23, 30, 28, 29, + 35, 36, 37, 33, 31, 34, 32, 38, -17, -17, + -18, -17, -18, 22, -45, 21, 2, 22, 7, 2, + -38, -27, 19, -27, 26, -27, -21, -21, 24, 17, + 2, 17, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 21, 2, 22, -4, -27, 26, 26, + 17, -23, -26, 57, -27, -31, -28, -28, -28, -24, + 14, -24, -26, -24, -26, -11, 92, 93, 94, 95, + -27, -27, -27, -25, -28, 24, 21, 2, 21, -28, } var yyDef = [...]int16{ - 0, -2, 128, 128, 0, 0, 7, 6, 1, 128, - 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, - 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 0, 2, -2, 3, 4, + 0, -2, 129, 129, 0, 0, 7, 6, 1, 129, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 0, 2, -2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 0, 107, 229, 230, 0, 240, 0, 84, - 85, -2, -2, -2, -2, -2, -2, -2, -2, -2, - -2, -2, -2, -2, -2, 223, 224, 0, 5, 99, - 0, 127, 130, 0, 135, 136, 140, 43, 43, 43, + 18, 19, 0, 108, 230, 231, 0, 241, 0, 85, + 86, -2, -2, -2, -2, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, 224, 225, 0, 5, 100, + 0, 128, 131, 0, 136, 137, 141, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 0, 0, 0, 0, 22, 23, 0, - 0, 0, 60, 0, 82, 83, 0, 88, 90, 0, - 94, 98, 241, 125, 0, 131, 0, 134, 139, 0, + 0, 0, 61, 0, 83, 84, 0, 89, 91, 0, + 95, 99, 242, 126, 0, 132, 0, 135, 140, 0, 42, 47, 48, 44, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 67, - 0, 69, 70, 0, 72, 235, 236, 73, 74, 231, - 232, 0, 0, 0, 81, 20, 21, 24, 0, 54, - 25, 0, 62, 64, 66, 86, 0, 91, 0, 97, - 225, 226, 227, 228, 0, 126, 129, 132, 133, 138, - 141, 143, 146, 150, 151, 152, 0, 26, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 68, + 0, 70, 71, 0, 73, 236, 237, 74, 75, 232, + 233, 0, 0, 0, 82, 20, 21, 24, 0, 54, + 25, 0, 63, 65, 67, 87, 0, 92, 0, 98, + 226, 227, 228, 229, 0, 127, 130, 133, 134, 139, + 142, 144, 147, 151, 152, 153, 0, 26, 0, 0, -2, -2, 27, 28, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 68, 0, 233, - 234, 75, 0, 80, 0, 53, 56, 58, 59, 194, + 35, 36, 37, 38, 39, 40, 41, 69, 0, 234, + 235, 76, 0, 81, 0, 53, 56, 58, 59, 60, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, - 215, 216, 217, 218, 219, 220, 221, 222, 61, 65, - 87, 89, 92, 96, 93, 95, 0, 0, 0, 0, - 0, 0, 0, 0, 156, 158, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 45, 46, 49, - 243, 50, 71, 0, 77, 79, 51, 0, 57, 63, - 142, 237, 144, 0, 147, 0, 0, 0, 154, 159, - 155, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 76, 78, 52, 55, 145, 0, 0, 153, - 157, 160, 0, 239, 161, 162, 163, 164, 165, 0, - 166, 167, 168, 169, 170, 176, 177, 178, 179, 148, - 149, 238, 0, 174, 0, 172, 175, 171, 173, + 215, 216, 217, 218, 219, 220, 221, 222, 223, 62, + 66, 88, 90, 93, 97, 94, 96, 0, 0, 0, + 0, 0, 0, 0, 0, 157, 159, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 45, 46, + 49, 244, 50, 72, 0, 78, 80, 51, 0, 57, + 64, 143, 238, 145, 0, 148, 0, 0, 0, 155, + 160, 156, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 77, 79, 52, 55, 146, 0, 0, + 154, 158, 161, 0, 240, 162, 163, 164, 165, 166, + 0, 167, 168, 169, 170, 171, 177, 178, 179, 180, + 149, 150, 239, 0, 175, 0, 173, 176, 172, 174, } var yyTok1 = [...]int8{ @@ -1249,18 +1254,28 @@ yydefault: case 58: yyDollar = yyS[yypt-1 : yypt+1] { - if !isLabel(yyDollar[1].item.Val) { + if !model.LabelName(yyDollar[1].item.Val).IsValid() { yylex.(*parser).unexpected("grouping opts", "label") } yyVAL.item = yyDollar[1].item } case 59: + yyDollar = yyS[yypt-1 : yypt+1] + { + if !model.LabelName(yylex.(*parser).unquoteString(yyDollar[1].item.Val)).IsValid() { + yylex.(*parser).unexpected("grouping opts", "label") + } + yyVAL.item = yyDollar[1].item + yyVAL.item.Pos++ + yyVAL.item.Val = yylex.(*parser).unquoteString(yyVAL.item.Val) + } + case 60: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("grouping opts", "label") yyVAL.item = Item{} } - case 60: + case 61: yyDollar = yyS[yypt-2 : yypt+1] { fn, exist := getFunction(yyDollar[1].item.Val, yylex.(*parser).functions) @@ -1279,38 +1294,38 @@ yydefault: }, } } - case 61: + case 62: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[2].node } - case 62: + case 63: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = Expressions{} } - case 63: + case 64: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = append(yyDollar[1].node.(Expressions), yyDollar[3].node.(Expr)) } - case 64: + case 65: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = Expressions{yyDollar[1].node.(Expr)} } - case 65: + case 66: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "trailing commas not allowed in function call args") yyVAL.node = yyDollar[1].node } - case 66: + case 67: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &ParenExpr{Expr: yyDollar[2].node.(Expr), PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item)} } - case 67: + case 68: yyDollar = yyS[yypt-3 : yypt+1] { numLit, _ := yyDollar[3].node.(*NumberLiteral) @@ -1318,7 +1333,7 @@ yydefault: yylex.(*parser).addOffset(yyDollar[1].node, dur) yyVAL.node = yyDollar[1].node } - case 68: + case 69: yyDollar = yyS[yypt-4 : yypt+1] { numLit, _ := yyDollar[4].node.(*NumberLiteral) @@ -1326,31 +1341,31 @@ yydefault: yylex.(*parser).addOffset(yyDollar[1].node, -dur) yyVAL.node = yyDollar[1].node } - case 69: + case 70: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("offset", "number or duration") yyVAL.node = yyDollar[1].node } - case 70: + case 71: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).setTimestamp(yyDollar[1].node, yyDollar[3].float) yyVAL.node = yyDollar[1].node } - case 71: + case 72: yyDollar = yyS[yypt-5 : yypt+1] { yylex.(*parser).setAtModifierPreprocessor(yyDollar[1].node, yyDollar[3].item) yyVAL.node = yyDollar[1].node } - case 72: + case 73: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("@", "timestamp") yyVAL.node = yyDollar[1].node } - case 75: + case 76: yyDollar = yyS[yypt-4 : yypt+1] { var errMsg string @@ -1375,7 +1390,7 @@ yydefault: EndPos: yylex.(*parser).lastClosing, } } - case 76: + case 77: yyDollar = yyS[yypt-6 : yypt+1] { numLitRange, _ := yyDollar[3].node.(*NumberLiteral) @@ -1387,7 +1402,7 @@ yydefault: EndPos: yyDollar[6].item.Pos + 1, } } - case 77: + case 78: yyDollar = yyS[yypt-5 : yypt+1] { numLitRange, _ := yyDollar[3].node.(*NumberLiteral) @@ -1398,31 +1413,31 @@ yydefault: EndPos: yyDollar[5].item.Pos + 1, } } - case 78: + case 79: yyDollar = yyS[yypt-6 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "\"]\"") yyVAL.node = yyDollar[1].node } - case 79: + case 80: yyDollar = yyS[yypt-5 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\"") yyVAL.node = yyDollar[1].node } - case 80: + case 81: yyDollar = yyS[yypt-4 : yypt+1] { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"") yyVAL.node = yyDollar[1].node } - case 81: + case 82: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "number or duration") yyVAL.node = yyDollar[1].node } - case 82: + case 83: yyDollar = yyS[yypt-2 : yypt+1] { if nl, ok := yyDollar[2].node.(*NumberLiteral); ok { @@ -1435,7 +1450,7 @@ yydefault: yyVAL.node = &UnaryExpr{Op: yyDollar[1].item.Typ, Expr: yyDollar[2].node.(Expr), StartPos: yyDollar[1].item.Pos} } } - case 83: + case 84: yyDollar = yyS[yypt-2 : yypt+1] { vs := yyDollar[2].node.(*VectorSelector) @@ -1444,7 +1459,7 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 84: + case 85: yyDollar = yyS[yypt-1 : yypt+1] { vs := &VectorSelector{ @@ -1455,14 +1470,14 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 85: + case 86: yyDollar = yyS[yypt-1 : yypt+1] { vs := yyDollar[1].node.(*VectorSelector) yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 86: + case 87: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1470,7 +1485,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item), } } - case 87: + case 88: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1478,7 +1493,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[4].item), } } - case 88: + case 89: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1486,7 +1501,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[2].item), } } - case 89: + case 90: yyDollar = yyS[yypt-3 : yypt+1] { if yyDollar[1].matchers != nil { @@ -1495,38 +1510,32 @@ yydefault: yyVAL.matchers = yyDollar[1].matchers } } - case 90: + case 91: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher} } - case 91: + case 92: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label matching", "\",\" or \"}\"") yyVAL.matchers = yyDollar[1].matchers } - case 92: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) - } case 93: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) } case 94: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) + } + case 95: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.matcher = yylex.(*parser).newMetricNameMatcher(yyDollar[1].item) } - case 95: - yyDollar = yyS[yypt-3 : yypt+1] - { - yylex.(*parser).unexpected("label matching", "string") - yyVAL.matcher = nil - } case 96: yyDollar = yyS[yypt-3 : yypt+1] { @@ -1534,89 +1543,95 @@ yydefault: yyVAL.matcher = nil } case 97: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).unexpected("label matching", "string") + yyVAL.matcher = nil + } + case 98: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label matching", "label matching operator") yyVAL.matcher = nil } - case 98: + case 99: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label matching", "identifier or \"}\"") yyVAL.matcher = nil } - case 99: + case 100: yyDollar = yyS[yypt-2 : yypt+1] { b := labels.NewBuilder(yyDollar[2].labels) b.Set(labels.MetricName, yyDollar[1].item.Val) yyVAL.labels = b.Labels() } - case 100: + case 101: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.labels = yyDollar[1].labels } - case 125: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.labels = labels.New(yyDollar[2].lblList...) - } case 126: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.labels = labels.New(yyDollar[2].lblList...) } case 127: + yyDollar = yyS[yypt-4 : yypt+1] + { + yyVAL.labels = labels.New(yyDollar[2].lblList...) + } + case 128: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.labels = labels.New() } - case 128: + case 129: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.labels = labels.New() } - case 129: + case 130: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label) } - case 130: + case 131: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.lblList = []labels.Label{yyDollar[1].label} } - case 131: + case 132: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\",\" or \"}\"") yyVAL.lblList = yyDollar[1].lblList } - case 132: + case 133: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } - case 133: + case 134: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 134: + case 135: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } - case 135: + case 136: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } - case 136: + case 137: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).generatedParserResult = &seriesDescription{ @@ -1624,33 +1639,33 @@ yydefault: values: yyDollar[2].series, } } - case 137: + case 138: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.series = []SequenceValue{} } - case 138: + case 139: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } - case 139: + case 140: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.series = yyDollar[1].series } - case 140: + case 141: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } - case 141: + case 142: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Omitted: true}} } - case 142: + case 143: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1658,12 +1673,12 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true}) } } - case 143: + case 144: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } - case 144: + case 145: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1672,7 +1687,7 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float}) } } - case 145: + case 146: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1682,12 +1697,12 @@ yydefault: yyDollar[1].float += yyDollar[2].float } } - case 146: + case 147: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}} } - case 147: + case 148: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1697,7 +1712,7 @@ yydefault: //$1 += $2 } } - case 148: + case 149: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1706,7 +1721,7 @@ yydefault: } yyVAL.series = val } - case 149: + case 150: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1715,7 +1730,7 @@ yydefault: } yyVAL.series = val } - case 150: + case 151: yyDollar = yyS[yypt-1 : yypt+1] { if yyDollar[1].item.Val != "stale" { @@ -1723,130 +1738,130 @@ yydefault: } yyVAL.float = math.Float64frombits(value.StaleNaN) } - case 153: - yyDollar = yyS[yypt-4 : yypt+1] - { - yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) - } case 154: - yyDollar = yyS[yypt-3 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } case 155: yyDollar = yyS[yypt-3 : yypt+1] { - m := yylex.(*parser).newMap() - yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) + yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } case 156: - yyDollar = yyS[yypt-2 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } case 157: + yyDollar = yyS[yypt-2 : yypt+1] + { + m := yylex.(*parser).newMap() + yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) + } + case 158: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors)) } - case 158: + case 159: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.descriptors = yyDollar[1].descriptors } - case 159: + case 160: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]") } - case 160: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["schema"] = yyDollar[3].int - } case 161: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["sum"] = yyDollar[3].float + yyVAL.descriptors["schema"] = yyDollar[3].int } case 162: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["count"] = yyDollar[3].float + yyVAL.descriptors["sum"] = yyDollar[3].float } case 163: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["z_bucket"] = yyDollar[3].float + yyVAL.descriptors["count"] = yyDollar[3].float } case 164: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float + yyVAL.descriptors["z_bucket"] = yyDollar[3].float } case 165: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set + yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float } case 166: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set } case 167: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["offset"] = yyDollar[3].int + yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set } case 168: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["offset"] = yyDollar[3].int } case 169: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_offset"] = yyDollar[3].int + yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set } case 170: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item + yyVAL.descriptors["n_offset"] = yyDollar[3].int } case 171: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = yyDollar[2].bucket_set + yyVAL.descriptors = yylex.(*parser).newMap() + yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item } case 172: - yyDollar = yyS[yypt-3 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.bucket_set = yyDollar[2].bucket_set } case 173: yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) + yyVAL.bucket_set = yyDollar[2].bucket_set } case 174: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) + } + case 175: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.bucket_set = []float64{yyDollar[1].float} } - case 229: + case 230: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &NumberLiteral{ @@ -1854,7 +1869,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 230: + case 231: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1868,12 +1883,12 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 231: + case 232: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 232: + case 233: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1884,17 +1899,17 @@ yydefault: } yyVAL.float = dur.Seconds() } - case 233: + case 234: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = yyDollar[2].float } - case 234: + case 235: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = -yyDollar[2].float } - case 237: + case 238: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1903,17 +1918,17 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 238: + case 239: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.int = -int64(yyDollar[2].uint) } - case 239: + case 240: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.int = int64(yyDollar[1].uint) } - case 240: + case 241: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &StringLiteral{ @@ -1921,7 +1936,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 241: + case 242: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.item = Item{ @@ -1930,7 +1945,7 @@ yydefault: Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), } } - case 242: + case 243: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.strings = nil diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 99b4b4644..9b88ab556 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -1059,16 +1059,3 @@ func isDigit(r rune) bool { func isAlpha(r rune) bool { return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') } - -// isLabel reports whether the string can be used as label. -func isLabel(s string) bool { - if len(s) == 0 || !isAlpha(rune(s[0])) { - return false - } - for _, c := range s[1:] { - if !isAlphaNumeric(c) { - return false - } - } - return true -} diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 3c679e5b0..37748323c 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -2397,6 +2397,51 @@ var testExpr = []struct { }, }, }, + { + input: `sum by ("foo")({"some.metric"})`, + expected: &AggregateExpr{ + Op: SUM, + Expr: &VectorSelector{ + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some.metric"), + }, + PosRange: posrange.PositionRange{ + Start: 15, + End: 30, + }, + }, + Grouping: []string{"foo"}, + PosRange: posrange.PositionRange{ + Start: 0, + End: 31, + }, + }, + }, + { + input: `sum by ("foo)(some_metric{})`, + fail: true, + errMsg: "unterminated quoted string", + }, + { + input: `sum by ("foo", bar, 'baz')({"some.metric"})`, + expected: &AggregateExpr{ + Op: SUM, + Expr: &VectorSelector{ + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some.metric"), + }, + PosRange: posrange.PositionRange{ + Start: 27, + End: 42, + }, + }, + Grouping: []string{"foo", "bar", "baz"}, + PosRange: posrange.PositionRange{ + Start: 0, + End: 43, + }, + }, + }, { input: "avg by (foo)(some_metric)", expected: &AggregateExpr{ @@ -3844,6 +3889,7 @@ func readable(s string) string { } func TestParseExpressions(t *testing.T) { + model.NameValidationScheme = model.UTF8Validation for _, test := range testExpr { t.Run(readable(test.input), func(t *testing.T) { expr, err := ParseExpr(test.input) diff --git a/promql/parser/printer.go b/promql/parser/printer.go index f3bdefdeb..5613956f7 100644 --- a/promql/parser/printer.go +++ b/promql/parser/printer.go @@ -77,14 +77,24 @@ func (node *AggregateExpr) getAggOpStr() string { switch { case node.Without: - aggrString += fmt.Sprintf(" without (%s) ", strings.Join(node.Grouping, ", ")) + aggrString += fmt.Sprintf(" without (%s) ", joinLabels(node.Grouping)) case len(node.Grouping) > 0: - aggrString += fmt.Sprintf(" by (%s) ", strings.Join(node.Grouping, ", ")) + aggrString += fmt.Sprintf(" by (%s) ", joinLabels(node.Grouping)) } return aggrString } +func joinLabels(ss []string) string { + for i, s := range ss { + // If the label is already quoted, don't quote it again. + if s[0] != '"' && s[0] != '\'' && s[0] != '`' && !model.IsValidLegacyMetricName(model.LabelValue(s)) { + ss[i] = fmt.Sprintf("\"%s\"", s) + } + } + return strings.Join(ss, ", ") +} + func (node *BinaryExpr) String() string { returnBool := "" if node.ReturnBool { diff --git a/promql/parser/printer_test.go b/promql/parser/printer_test.go index d2e301a88..0a557ad59 100644 --- a/promql/parser/printer_test.go +++ b/promql/parser/printer_test.go @@ -16,6 +16,7 @@ package parser import ( "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" @@ -44,6 +45,14 @@ func TestExprString(t *testing.T) { in: `sum without(instance) (task:errors:rate10s{job="s"})`, out: `sum without (instance) (task:errors:rate10s{job="s"})`, }, + { + in: `sum by("foo.bar") (task:errors:rate10s{job="s"})`, + out: `sum by ("foo.bar") (task:errors:rate10s{job="s"})`, + }, + { + in: `sum without("foo.bar") (task:errors:rate10s{job="s"})`, + out: `sum without ("foo.bar") (task:errors:rate10s{job="s"})`, + }, { in: `topk(5, task:errors:rate10s{job="s"})`, }, @@ -157,6 +166,8 @@ func TestExprString(t *testing.T) { }, } + model.NameValidationScheme = model.UTF8Validation + for _, test := range inputs { expr, err := ParseExpr(test.in) require.NoError(t, err) From 2bcac1b77b7c98fe5b97ea325790c95daa6328db Mon Sep 17 00:00:00 2001 From: Callum Styan Date: Thu, 1 Aug 2024 12:32:49 -0700 Subject: [PATCH 19/99] change comment that allows timestamps to be optional Signed-off-by: Callum Styan --- prompb/io/prometheus/write/v2/types.proto | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/prompb/io/prometheus/write/v2/types.proto b/prompb/io/prometheus/write/v2/types.proto index 0cc7b8bc4..ff6c4936b 100644 --- a/prompb/io/prometheus/write/v2/types.proto +++ b/prompb/io/prometheus/write/v2/types.proto @@ -107,15 +107,10 @@ message Exemplar { // value represents an exact example value. This can be useful when the exemplar // is attached to a histogram, which only gives an estimated value through buckets. double value = 2; - // timestamp represents an optional timestamp of the sample in ms. + // timestamp represents the timestamp of the exemplar in ms. // // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go // for conversion from/to time.Time to Prometheus timestamp. - // - // Note that the "optional" keyword is omitted due to - // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields - // Zero value means value not set. If you need to use exactly zero value for - // the timestamp, use 1 millisecond before or after. int64 timestamp = 3; } From 6ca5b9cd40c5bd4e64fe3b281463a73f1035141b Mon Sep 17 00:00:00 2001 From: Callum Styan Date: Thu, 1 Aug 2024 12:41:55 -0700 Subject: [PATCH 20/99] regenerate pb files with new comment Signed-off-by: Callum Styan --- prompb/io/prometheus/write/v2/types.pb.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/prompb/io/prometheus/write/v2/types.pb.go b/prompb/io/prometheus/write/v2/types.pb.go index d6ea8398f..3420d20e2 100644 --- a/prompb/io/prometheus/write/v2/types.pb.go +++ b/prompb/io/prometheus/write/v2/types.pb.go @@ -302,15 +302,10 @@ type Exemplar struct { // value represents an exact example value. This can be useful when the exemplar // is attached to a histogram, which only gives an estimated value through buckets. Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` - // timestamp represents an optional timestamp of the sample in ms. + // timestamp represents the timestamp of the exemplar in ms. // // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go // for conversion from/to time.Time to Prometheus timestamp. - // - // Note that the "optional" keyword is omitted due to - // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields - // Zero value means value not set. If you need to use exactly zero value for - // the timestamp, use 1 millisecond before or after. Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` From cb6f9ff08867ce25b57c0295b984f84ca64ba546 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 23:20:40 +0000 Subject: [PATCH 21/99] build(deps): bump github.com/ionos-cloud/sdk-go/v6 from 6.1.11 to 6.2.0 Bumps [github.com/ionos-cloud/sdk-go/v6](https://github.com/ionos-cloud/sdk-go) from 6.1.11 to 6.2.0. - [Release notes](https://github.com/ionos-cloud/sdk-go/releases) - [Changelog](https://github.com/ionos-cloud/sdk-go/blob/master/docs/CHANGELOG.md) - [Commits](https://github.com/ionos-cloud/sdk-go/compare/v6.1.11...v6.2.0) --- updated-dependencies: - dependency-name: github.com/ionos-cloud/sdk-go/v6 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c74452cd5..815b00ae9 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/hashicorp/consul/api v1.29.2 github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 github.com/hetznercloud/hcloud-go/v2 v2.12.0 - github.com/ionos-cloud/sdk-go/v6 v6.1.11 + github.com/ionos-cloud/sdk-go/v6 v6.2.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.9 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b diff --git a/go.sum b/go.sum index d96710c17..ce2d1026a 100644 --- a/go.sum +++ b/go.sum @@ -423,8 +423,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= -github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.2.0 h1:qX7gachC0wJSmFfVRnd+DHmz9AStvVraKcwQ/JokIB4= +github.com/ionos-cloud/sdk-go/v6 v6.2.0/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= From 8d61a3e325d424efaec2a051553f2ff8a9b1430d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 23:20:57 +0000 Subject: [PATCH 22/99] build(deps): bump google.golang.org/api from 0.189.0 to 0.190.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.189.0 to 0.190.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.189.0...v0.190.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index c74452cd5..b0880982d 100644 --- a/go.mod +++ b/go.mod @@ -82,8 +82,8 @@ require ( golang.org/x/text v0.16.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.23.0 - google.golang.org/api v0.189.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d + google.golang.org/api v0.190.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f google.golang.org/grpc v1.65.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 @@ -96,7 +96,7 @@ require ( ) require ( - cloud.google.com/go/auth v0.7.2 // indirect + cloud.google.com/go/auth v0.7.3 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect @@ -140,9 +140,9 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect + github.com/google/s2a-go v0.1.8 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.5 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect @@ -191,7 +191,7 @@ require ( golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.19.0 // indirect golang.org/x/term v0.22.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect diff --git a/go.sum b/go.sum index d96710c17..12b137ad4 100644 --- a/go.sum +++ b/go.sum @@ -12,8 +12,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE= -cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs= +cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY= +cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA= cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -322,8 +322,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g= github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -332,8 +332,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= -github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8= github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -1047,8 +1047,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI= -google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8= +google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q= +google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1085,10 +1085,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY= -google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade h1:oCRSWfwGXQsqlVdErcyTt4A93Y8fo0/9D4b1gnI++qo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= +google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= From 5aeb5061f9e7abc4744b570c323964c6c06df593 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 23:46:27 +0000 Subject: [PATCH 23/99] build(deps): bump actions/setup-go from 5.0.1 to 5.0.2 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.1 to 5.0.2. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/cdcb36043654635271a94b9a6d1392de5bb323a7...0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a5a09a98f..c97d090b3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -75,7 +75,7 @@ jobs: runs-on: windows-latest steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: 1.22.x - run: | @@ -162,7 +162,7 @@ jobs: - name: Checkout repository uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Install Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: cache: false go-version: 1.22.x @@ -175,7 +175,7 @@ jobs: - name: Checkout repository uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Install Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: 1.22.x - name: Install snmp_exporter/generator dependencies From 8867959c5493127b4f7add2e7a20c054bab61c57 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 23:46:29 +0000 Subject: [PATCH 24/99] build(deps): bump ossf/scorecard-action from 2.3.3 to 2.4.0 Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.3.3 to 2.4.0. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/dc50aa9510b46c811795eb24b2f1ba02a914e534...62b2cac7ed8198b15735ed49ab1e5cf35480ba46) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/scorecards.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index c82fa87a1..a6eecccc9 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -26,7 +26,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # tag=v2.3.3 + uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # tag=v2.4.0 with: results_file: results.sarif results_format: sarif From 9bc42f4400d2be308054f91b14a1402b4485d0d0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 23:46:33 +0000 Subject: [PATCH 25/99] build(deps): bump bufbuild/buf-setup-action from 1.34.0 to 1.35.1 Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.34.0 to 1.35.1. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/35c243d7f2a909b1d4e40399b348a7fdab27d78d...aceb106d2419c4cff48863df90161d92decb8591) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index cbfeb2ba5..9f60a2336 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0 + - uses: bufbuild/buf-setup-action@aceb106d2419c4cff48863df90161d92decb8591 # v1.35.1 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 8b964ef24..1856fb95e 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -13,7 +13,7 @@ jobs: if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0 + - uses: bufbuild/buf-setup-action@aceb106d2419c4cff48863df90161d92decb8591 # v1.35.1 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 From 918ce1f8c234170f9a5c7eff537496bda9665be6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 23:46:41 +0000 Subject: [PATCH 26/99] build(deps): bump github/codeql-action from 3.25.11 to 3.25.15 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.11 to 3.25.15. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/b611370bb5703a7efb587f9d136a52ea24c5c38c...afb54ba388a7dca6ecae48f608c4ff05ff4cc77a) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql-analysis.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 12ffc659c..2e1bd3024 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,12 +27,12 @@ jobs: uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Initialize CodeQL - uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 + uses: github/codeql-action/init@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 + uses: github/codeql-action/autobuild@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 + uses: github/codeql-action/analyze@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index c82fa87a1..40e867510 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11 + uses: github/codeql-action/upload-sarif@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # tag=v3.25.15 with: sarif_file: results.sarif From 3cad51bcb2e9cdacf8e0ffb9b6ebf1f8296546e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 23:46:44 +0000 Subject: [PATCH 27/99] build(deps): bump golangci/golangci-lint-action from 6.0.1 to 6.1.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.0.1 to 6.1.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/a4f60bb28d35aeee14e6880718e0c85ff1882e64...aaa42aa0628b4ae2578232a66b541047968fac86) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a5a09a98f..b08e7389b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -182,7 +182,7 @@ jobs: run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 + uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 with: args: --verbose # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. From f6f911db751fb4011ff1298c43a5c5bf262df55f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 23:46:48 +0000 Subject: [PATCH 28/99] build(deps): bump actions/upload-artifact from 4.3.3 to 4.3.4 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.3 to 4.3.4. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/65462800fd760344b1a7b4382951275a0abb4808...0b2256b8c012f0828dc542b3febcab082c67f72b) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/fuzzing.yml | 2 +- .github/workflows/scorecards.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index dc510e596..f3953cb2a 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -21,7 +21,7 @@ jobs: fuzz-seconds: 600 dry-run: false - name: Upload Crash - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index c82fa87a1..6e16660ac 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -37,7 +37,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # tag=v4.3.4 with: name: SARIF file path: results.sarif From 7d086171fc3c12af970aa3fb43ca280c23a4a205 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 23:54:16 +0000 Subject: [PATCH 29/99] build(deps): bump golangci/golangci-lint-action in /scripts Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.0.1 to 6.1.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/a4f60bb28d35aeee14e6880718e0c85ff1882e64...aaa42aa0628b4ae2578232a66b541047968fac86) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- scripts/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 83ae3906c..937f2e21d 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -33,7 +33,7 @@ jobs: run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 + uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 with: args: --verbose version: v1.59.1 From 4553d1baa62da853e7e6e4841f9a376af47b327e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 23:54:21 +0000 Subject: [PATCH 30/99] build(deps): bump actions/setup-go from 5.0.1 to 5.0.2 in /scripts Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.1 to 5.0.2. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/cdcb36043654635271a94b9a6d1392de5bb323a7...0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- scripts/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 83ae3906c..90e3ed79a 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -26,7 +26,7 @@ jobs: - name: Checkout repository uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: 1.22.x - name: Install snmp_exporter/generator dependencies From a5666dded5d12582923626d68f9ec5fffb0a7efe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Aug 2024 10:06:52 +0200 Subject: [PATCH 31/99] build(deps): bump @codemirror/view from 6.28.3 to 6.29.1 in /web/ui (#14564) Bumps [@codemirror/view](https://github.com/codemirror/view) from 6.28.3 to 6.29.1. - [Changelog](https://github.com/codemirror/view/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/view/compare/6.28.3...6.29.1) --- updated-dependencies: - dependency-name: "@codemirror/view" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 10 +++++----- web/ui/react-app/package.json | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index ba924346f..51463e08a 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -37,7 +37,7 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.28.3", + "@codemirror/view": "^6.29.1", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 2028c3402..c866cf35e 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -41,7 +41,7 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.28.3", + "@codemirror/view": "^6.29.1", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.1", @@ -2093,9 +2093,9 @@ "integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A==" }, "node_modules/@codemirror/view": { - "version": "6.28.3", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.28.3.tgz", - "integrity": "sha512-QVqP+ko078/h9yrW+u5grX3rQhC+BkGKADRrlDaJznfPngJOv5zObiVf0+SgAWhL/Yt0nvZ+10rO3L+gU5IbFw==", + "version": "6.29.1", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.29.1.tgz", + "integrity": "sha512-7r+DlO/QFwPqKp73uq5mmrS4TuLPUVotbNOKYzN3OLP5ScrOVXcm4g13/48b6ZXGhdmzMinzFYqH0vo+qihIkQ==", "dependencies": { "@codemirror/state": "^6.4.0", "style-mod": "^4.1.0", @@ -19340,7 +19340,7 @@ "@codemirror/lint": "^6.8.1", "@codemirror/search": "^6.5.6", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.28.3", + "@codemirror/view": "^6.29.1", "@forevolve/bootstrap-dark": "^4.0.2", "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index c8002433a..a1b15b8f7 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -9,7 +9,7 @@ "@codemirror/lint": "^6.8.1", "@codemirror/search": "^6.5.6", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.28.3", + "@codemirror/view": "^6.29.1", "@forevolve/bootstrap-dark": "^4.0.2", "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", From 5681ec50e1fc1e591c4dcc63eb343478ea2082ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Aug 2024 11:47:04 +0200 Subject: [PATCH 32/99] build(deps): bump @lezer/lr from 1.4.1 to 1.4.2 in /web/ui (#14560) Bumps [@lezer/lr](https://github.com/lezer-parser/lr) from 1.4.1 to 1.4.2. - [Changelog](https://github.com/lezer-parser/lr/blob/main/CHANGELOG.md) - [Commits](https://github.com/lezer-parser/lr/compare/1.4.1...1.4.2) --- updated-dependencies: - dependency-name: "@lezer/lr" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 12 ++++++------ web/ui/react-app/package.json | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 51463e08a..40053865b 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -40,7 +40,7 @@ "@codemirror/view": "^6.29.1", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.1", + "@lezer/lr": "^1.4.2", "isomorphic-fetch": "^3.0.0", "nock": "^13.5.4" }, diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 43a5c44fa..8195c5f1e 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -32,7 +32,7 @@ "devDependencies": { "@lezer/generator": "^1.7.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.1" + "@lezer/lr": "^1.4.2" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index c866cf35e..6a759a8bc 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -44,7 +44,7 @@ "@codemirror/view": "^6.29.1", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.1", + "@lezer/lr": "^1.4.2", "isomorphic-fetch": "^3.0.0", "nock": "^13.5.4" }, @@ -74,7 +74,7 @@ "devDependencies": { "@lezer/generator": "^1.7.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.1" + "@lezer/lr": "^1.4.2" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", @@ -3391,9 +3391,9 @@ } }, "node_modules/@lezer/lr": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.1.tgz", - "integrity": "sha512-CHsKq8DMKBf9b3yXPDIU4DbH+ZJd/sJdYOW2llbW/HudP5u0VS6Bfq1hLYfgU7uAYGFIyGGQIsSOXGPEErZiJw==", + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.2.tgz", + "integrity": "sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA==", "dependencies": { "@lezer/common": "^1.0.0" } @@ -19347,7 +19347,7 @@ "@fortawesome/react-fontawesome": "0.2.0", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.1", + "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", "@prometheus-io/codemirror-promql": "0.53.1", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index a1b15b8f7..5e3c684ba 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -16,7 +16,7 @@ "@fortawesome/react-fontawesome": "0.2.0", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.1", + "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", "@prometheus-io/codemirror-promql": "0.53.1", From 77d111e5013f386d8cac403d31aff17e1a351cf8 Mon Sep 17 00:00:00 2001 From: suntala Date: Fri, 2 Aug 2024 14:25:22 +0200 Subject: [PATCH 33/99] Fix links to feature flags page Signed-off-by: suntala --- docs/querying/functions.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index de65e693d..ee81328b5 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -617,7 +617,7 @@ Like `sort`, `sort_desc` only affects the results of instant queries, as range q ## `sort_by_label()` -**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** +**This function has to be enabled via the [feature flag](../feature_flags.md) `--enable-feature=promql-experimental-functions`.** `sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by their label values and sample value in case of label values being equal, in ascending order. @@ -627,7 +627,7 @@ This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_so ## `sort_by_label_desc()` -**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** +**This function has to be enabled via the [feature flag](../feature_flags.md) `--enable-feature=promql-experimental-functions`.** Same as `sort_by_label`, but sorts in descending order. @@ -676,7 +676,7 @@ over time and return an instant vector with per-series aggregation results: * `last_over_time(range-vector)`: the most recent point value in the specified interval. * `present_over_time(range-vector)`: the value 1 for any series in the specified interval. -If the [feature flag](../feature_flags/) +If the [feature flag](../feature_flags.md) `--enable-feature=promql-experimental-functions` is set, the following additional functions are available: From 099d209a2688fca7adbb66bc9da3556c2cb58132 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 3 Aug 2024 13:20:29 +0000 Subject: [PATCH 34/99] build(deps): bump braces from 3.0.2 to 3.0.3 in /web/ui Bumps [braces](https://github.com/micromatch/braces) from 3.0.2 to 3.0.3. - [Changelog](https://github.com/micromatch/braces/blob/master/CHANGELOG.md) - [Commits](https://github.com/micromatch/braces/compare/3.0.2...3.0.3) --- updated-dependencies: - dependency-name: braces dependency-type: indirect ... Signed-off-by: dependabot[bot] --- web/ui/package-lock.json | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 6a759a8bc..641d4b388 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -5639,10 +5639,11 @@ } }, "node_modules/braces": { - "version": "3.0.2", - "license": "MIT", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" @@ -8368,8 +8369,9 @@ } }, "node_modules/fill-range": { - "version": "7.0.1", - "license": "MIT", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dependencies": { "to-regex-range": "^5.0.1" }, @@ -9548,7 +9550,8 @@ }, "node_modules/is-number": { "version": "7.0.0", - "license": "MIT", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "engines": { "node": ">=0.12.0" } @@ -17969,7 +17972,8 @@ }, "node_modules/to-regex-range": { "version": "5.0.1", - "license": "MIT", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "dependencies": { "is-number": "^7.0.0" }, From 6b9c386d143bc3aa2f5075917f03b787d4ff9982 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 4 Aug 2024 15:58:51 +0000 Subject: [PATCH 35/99] build(deps): bump follow-redirects from 1.15.2 to 1.15.6 in /web/ui Bumps [follow-redirects](https://github.com/follow-redirects/follow-redirects) from 1.15.2 to 1.15.6. - [Release notes](https://github.com/follow-redirects/follow-redirects/releases) - [Commits](https://github.com/follow-redirects/follow-redirects/compare/v1.15.2...v1.15.6) --- updated-dependencies: - dependency-name: follow-redirects dependency-type: indirect ... Signed-off-by: dependabot[bot] --- web/ui/package-lock.json | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 641d4b388..d477bb20a 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -8458,14 +8458,15 @@ "license": "ISC" }, "node_modules/follow-redirects": { - "version": "1.15.2", + "version": "1.15.6", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", + "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", "funding": [ { "type": "individual", "url": "https://github.com/sponsors/RubenVerborgh" } ], - "license": "MIT", "engines": { "node": ">=4.0" }, From 12dd9991645a17bf7fba4395b19081a1d80d6a30 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 4 Aug 2024 15:58:56 +0000 Subject: [PATCH 36/99] build(deps-dev): bump ejs from 3.1.8 to 3.1.10 in /web/ui Bumps [ejs](https://github.com/mde/ejs) from 3.1.8 to 3.1.10. - [Release notes](https://github.com/mde/ejs/releases) - [Commits](https://github.com/mde/ejs/compare/v3.1.8...v3.1.10) --- updated-dependencies: - dependency-name: ejs dependency-type: indirect ... Signed-off-by: dependabot[bot] --- web/ui/package-lock.json | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 641d4b388..00c24c9cf 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -7113,9 +7113,10 @@ "license": "MIT" }, "node_modules/ejs": { - "version": "3.1.8", + "version": "3.1.10", + "resolved": "https://registry.npmjs.org/ejs/-/ejs-3.1.10.tgz", + "integrity": "sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==", "dev": true, - "license": "Apache-2.0", "dependencies": { "jake": "^10.8.5" }, From 983d0bd5cb86e904cfaf6dde96d8b7ba30ea69f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 4 Aug 2024 16:00:36 +0000 Subject: [PATCH 37/99] build(deps-dev): bump express from 4.18.1 to 4.19.2 in /web/ui Bumps [express](https://github.com/expressjs/express) from 4.18.1 to 4.19.2. - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/master/History.md) - [Commits](https://github.com/expressjs/express/compare/4.18.1...4.19.2) --- updated-dependencies: - dependency-name: express dependency-type: indirect ... Signed-off-by: dependabot[bot] --- web/ui/package-lock.json | 68 ++++++++++++++++++++++++---------------- 1 file changed, 41 insertions(+), 27 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 641d4b388..0bd199720 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -5541,20 +5541,21 @@ "license": "MIT" }, "node_modules/body-parser": { - "version": "1.20.0", + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", "dev": true, - "license": "MIT", "dependencies": { "bytes": "3.1.2", - "content-type": "~1.0.4", + "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.10.3", - "raw-body": "2.5.1", + "qs": "6.11.0", + "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" }, @@ -5565,24 +5566,27 @@ }, "node_modules/body-parser/node_modules/bytes": { "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", "dev": true, - "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/body-parser/node_modules/debug": { "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", "dev": true, - "license": "MIT", "dependencies": { "ms": "2.0.0" } }, "node_modules/body-parser/node_modules/iconv-lite": { "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", "dev": true, - "license": "MIT", "dependencies": { "safer-buffer": ">= 2.1.2 < 3" }, @@ -5592,8 +5596,9 @@ }, "node_modules/body-parser/node_modules/ms": { "version": "2.0.0", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "dev": true }, "node_modules/bonjour-service": { "version": "1.0.14", @@ -6215,9 +6220,10 @@ "license": "MIT" }, "node_modules/content-type": { - "version": "1.0.4", + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", "dev": true, - "license": "MIT", "engines": { "node": ">= 0.6" } @@ -6231,9 +6237,10 @@ } }, "node_modules/cookie": { - "version": "0.5.0", + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", "dev": true, - "license": "MIT", "engines": { "node": ">= 0.6" } @@ -8148,16 +8155,17 @@ "license": "MIT" }, "node_modules/express": { - "version": "4.18.1", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", + "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", "dev": true, - "license": "MIT", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.0", + "body-parser": "1.20.2", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.5.0", + "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", @@ -8173,7 +8181,7 @@ "parseurl": "~1.3.3", "path-to-regexp": "0.1.7", "proxy-addr": "~2.0.7", - "qs": "6.10.3", + "qs": "6.11.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", "send": "0.18.0", @@ -12362,8 +12370,9 @@ }, "node_modules/media-typer": { "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", "dev": true, - "license": "MIT", "engines": { "node": ">= 0.6" } @@ -14787,9 +14796,10 @@ } }, "node_modules/qs": { - "version": "6.10.3", + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "side-channel": "^1.0.4" }, @@ -14877,9 +14887,10 @@ } }, "node_modules/raw-body": { - "version": "2.5.1", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", "dev": true, - "license": "MIT", "dependencies": { "bytes": "3.1.2", "http-errors": "2.0.0", @@ -14892,16 +14903,18 @@ }, "node_modules/raw-body/node_modules/bytes": { "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", "dev": true, - "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/raw-body/node_modules/iconv-lite": { "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", "dev": true, - "license": "MIT", "dependencies": { "safer-buffer": ">= 2.1.2 < 3" }, @@ -18195,8 +18208,9 @@ }, "node_modules/type-is": { "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", "dev": true, - "license": "MIT", "dependencies": { "media-typer": "0.3.0", "mime-types": "~2.1.24" From 58aa1887476e3e2489eccea5706741f0a5bb3e26 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 4 Aug 2024 16:52:21 +0000 Subject: [PATCH 38/99] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity Bumps [github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://github.com/Azure/azure-sdk-for-go) from 1.5.2 to 1.6.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/internal/v1.5.2...sdk/azcore/v1.6.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azidentity dependency-type: indirect ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 5 ++--- documentation/examples/remote_storage/go.sum | 15 ++++++--------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 4c41a6606..35dca85a0 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -16,8 +16,8 @@ require ( require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect github.com/aws/aws-sdk-go v1.53.16 // indirect @@ -36,7 +36,6 @@ require ( github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.8 // indirect - github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 9898d75d7..4c420092f 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,9 +1,9 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= @@ -39,7 +39,6 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -50,8 +49,6 @@ github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LO github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -279,8 +276,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc= github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= From bded8530357d40166f70952ebc3b9803db9e63e4 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sat, 3 Aug 2024 22:55:42 +0100 Subject: [PATCH 39/99] [Test] TSDB: TestOOOCompaction with samples added after compaction starts Test fails due to bug. Signed-off-by: Bryan Boreham --- tsdb/db.go | 8 ++++++++ tsdb/db_test.go | 44 ++++++++++++++++++++++++++++++-------------- 2 files changed, 38 insertions(+), 14 deletions(-) diff --git a/tsdb/db.go b/tsdb/db.go index 090d6fcf0..87870a847 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -1295,6 +1295,9 @@ func (db *DB) CompactOOOHead(ctx context.Context) error { return db.compactOOOHead(ctx) } +// Callback for testing. +var compactOOOHeadTestingCallback func() + func (db *DB) compactOOOHead(ctx context.Context) error { if !db.oooWasEnabled.Load() { return nil @@ -1304,6 +1307,11 @@ func (db *DB) compactOOOHead(ctx context.Context) error { return fmt.Errorf("get ooo compaction head: %w", err) } + if compactOOOHeadTestingCallback != nil { + compactOOOHeadTestingCallback() + compactOOOHeadTestingCallback = nil + } + ulids, err := db.compactOOO(db.dir, oooHead) if err != nil { return fmt.Errorf("compact ooo head: %w", err) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index c0edafe08..3dae9a5d1 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -4497,12 +4497,15 @@ func TestMetadataAssertInMemoryData(t *testing.T) { func TestOOOCompaction(t *testing.T) { for name, scenario := range sampleTypeScenarios { t.Run(name, func(t *testing.T) { - testOOOCompaction(t, scenario) + testOOOCompaction(t, scenario, false) + }) + t.Run(name+"+extra", func(t *testing.T) { + testOOOCompaction(t, scenario, true) }) } } -func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { +func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSamples bool) { dir := t.TempDir() ctx := context.Background() @@ -4533,7 +4536,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { } // Add an in-order samples. - addSample(250, 350) + addSample(250, 300) // Verify that the in-memory ooo chunk is empty. checkEmptyOOOChunk := func(lbls labels.Labels) { @@ -4547,15 +4550,17 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { // Add ooo samples that creates multiple chunks. // 90 to 300 spans across 3 block ranges: [0, 120), [120, 240), [240, 360) - addSample(90, 310) + addSample(90, 300) // Adding same samples to create overlapping chunks. // Since the active chunk won't start at 90 again, all the new // chunks will have different time ranges than the previous chunks. - addSample(90, 310) + addSample(90, 300) + + var highest int64 = 300 verifyDBSamples := func() { var series1Samples, series2Samples []chunks.Sample - for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, 350}} { + for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, highest}} { fromMins, toMins := r[0], r[1] for min := fromMins; min <= toMins; min++ { ts := min * time.Minute.Milliseconds() @@ -4583,7 +4588,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { require.NoError(t, err) require.False(t, created) require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples()) - require.Len(t, ms.ooo.oooMmappedChunks, 14) // 7 original, 7 duplicate. + require.Len(t, ms.ooo.oooMmappedChunks, 13) // 7 original, 6 duplicate. } checkNonEmptyOOOChunk(series1) checkNonEmptyOOOChunk(series2) @@ -4601,6 +4606,15 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { require.NoError(t, err) require.Greater(t, f.Size(), int64(100)) + if addExtraSamples { + compactOOOHeadTestingCallback = func() { + addSample(90, 120) // Back in time, to generate a new OOO chunk. + addSample(300, 330) // Now some samples after the previous highest timestamp. + addSample(300, 330) // Repeat to generate an OOO chunk at these timestamps. + } + highest = 330 + } + // OOO compaction happens here. require.NoError(t, db.CompactOOOHead(ctx)) @@ -4616,11 +4630,13 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { require.Equal(t, "00000001", files[0].Name()) f, err = files[0].Info() require.NoError(t, err) - require.Equal(t, int64(0), f.Size()) - // OOO stuff should not be present in the Head now. - checkEmptyOOOChunk(series1) - checkEmptyOOOChunk(series2) + if !addExtraSamples { + require.Equal(t, int64(0), f.Size()) + // OOO stuff should not be present in the Head now. + checkEmptyOOOChunk(series1) + checkEmptyOOOChunk(series2) + } verifySamples := func(block *Block, fromMins, toMins int64) { series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) @@ -4645,7 +4661,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { // Checking for expected data in the blocks. verifySamples(db.Blocks()[0], 90, 119) verifySamples(db.Blocks()[1], 120, 239) - verifySamples(db.Blocks()[2], 240, 310) + verifySamples(db.Blocks()[2], 240, 299) // There should be a single m-map file. mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot) @@ -4658,7 +4674,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds())) require.NoError(t, err) require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351) - verifySamples(db.Blocks()[3], 250, 350) + verifySamples(db.Blocks()[3], 250, highest) verifyDBSamples() // Blocks created out of normal and OOO head now. But not merged. @@ -4675,7 +4691,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { require.Len(t, db.Blocks(), 3) // [0, 120), [120, 240), [240, 360) verifySamples(db.Blocks()[0], 90, 119) verifySamples(db.Blocks()[1], 120, 239) - verifySamples(db.Blocks()[2], 240, 350) // Merged block. + verifySamples(db.Blocks()[2], 240, highest) // Merged block. verifyDBSamples() // Final state. Blocks from normal and OOO head are merged. } From 015638c4b6b8f913ea5165f0f5d26950937f1e69 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sat, 3 Aug 2024 23:24:39 +0100 Subject: [PATCH 40/99] [BUGFIX] TSDB: Exclude OOO chunks mapped after compaction starts Otherwise the writer can end up with invalid chunks. Signed-off-by: Bryan Boreham --- tsdb/head_read.go | 5 ++++- tsdb/ooo_head.go | 2 +- tsdb/ooo_head_read.go | 16 +++++++++------- tsdb/ooo_head_read_test.go | 6 +++--- 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 9ba8785ad..87564ae3c 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -467,7 +467,7 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi // amongst all the chunks in the OOOHead. // This function is not thread safe unless the caller holds a lock. // The caller must ensure that s.ooo is not nil. -func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64) (*mergedOOOChunks, error) { +func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64, maxMmapRef chunks.ChunkDiskMapperRef) (*mergedOOOChunks, error) { _, cid := chunks.HeadChunkRef(meta.Ref).Unpack() // ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are @@ -490,6 +490,9 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks)+1) for i, c := range s.ooo.oooMmappedChunks { + if maxMmapRef != 0 && c.ref > maxMmapRef { + break + } if c.OverlapsClosedInterval(mint, maxt) { tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ meta: chunks.Meta{ diff --git a/tsdb/ooo_head.go b/tsdb/ooo_head.go index b2556d62e..01b5bff63 100644 --- a/tsdb/ooo_head.go +++ b/tsdb/ooo_head.go @@ -201,7 +201,7 @@ func (oh *OOORangeHead) Index() (IndexReader, error) { } func (oh *OOORangeHead) Chunks() (ChunkReader, error) { - return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt, oh.isoState), nil + return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt, oh.isoState, 0), nil } func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) { diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index a35276af5..9d5b9d644 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -243,14 +243,16 @@ type OOOHeadChunkReader struct { head *Head mint, maxt int64 isoState *oooIsolationState + maxMmapRef chunks.ChunkDiskMapperRef } -func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState) *OOOHeadChunkReader { +func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *OOOHeadChunkReader { return &OOOHeadChunkReader{ - head: head, - mint: mint, - maxt: maxt, - isoState: isoState, + head: head, + mint: mint, + maxt: maxt, + isoState: isoState, + maxMmapRef: maxMmapRef, } } @@ -269,7 +271,7 @@ func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, s.Unlock() return nil, nil, storage.ErrNotFound } - mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt) + mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt, cr.maxMmapRef) s.Unlock() if err != nil { return nil, nil, err @@ -386,7 +388,7 @@ func (ch *OOOCompactionHead) Index() (IndexReader, error) { } func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) { - return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil), nil + return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil, ch.lastMmapRef), nil } func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) { diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index 7ecd355b5..8cc3f1dde 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -481,7 +481,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { t.Run("Getting a non existing chunk fails with not found error", func(t *testing.T) { db := newTestDBWithOpts(t, opts) - cr := NewOOOHeadChunkReader(db.head, 0, 1000, nil) + cr := NewOOOHeadChunkReader(db.head, 0, 1000, nil, 0) defer cr.Close() c, iterable, err := cr.ChunkOrIterable(chunks.Meta{ Ref: 0x1000000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300, @@ -839,7 +839,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { require.NoError(t, err) require.Equal(t, len(tc.expChunksSamples), len(chks)) - cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil) + cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, 0) defer cr.Close() for i := 0; i < len(chks); i++ { c, iterable, err := cr.ChunkOrIterable(chks[i]) @@ -1013,7 +1013,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( } require.NoError(t, app.Commit()) - cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil) + cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, 0) defer cr.Close() for i := 0; i < len(chks); i++ { c, iterable, err := cr.ChunkOrIterable(chks[i]) From 9e43ad2e3710b8051c0c3b7bc70353cb65bd197f Mon Sep 17 00:00:00 2001 From: machine424 Date: Mon, 8 Jul 2024 12:15:37 +0200 Subject: [PATCH 41/99] chore(remote_write): clean up as watcher.go is part of wlog now Signed-off-by: machine424 --- tsdb/wlog/watcher.go | 92 +++++++++----------------------------------- 1 file changed, 18 insertions(+), 74 deletions(-) diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index bc7a144e6..9a02f3de4 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -20,7 +20,6 @@ import ( "math" "os" "path/filepath" - "slices" "strconv" "strings" "time" @@ -265,9 +264,9 @@ func (w *Watcher) loop() { // Run the watcher, which will tail the WAL until the quit channel is closed // or an error case is hit. func (w *Watcher) Run() error { - _, lastSegment, err := w.firstAndLast() + _, lastSegment, err := Segments(w.walDir) if err != nil { - return fmt.Errorf("wal.Segments: %w", err) + return fmt.Errorf("Segments: %w", err) } // We want to ensure this is false across iterations since @@ -318,57 +317,20 @@ func (w *Watcher) Run() error { // findSegmentForIndex finds the first segment greater than or equal to index. func (w *Watcher) findSegmentForIndex(index int) (int, error) { - refs, err := w.segments(w.walDir) + refs, err := listSegments(w.walDir) if err != nil { return -1, err } for _, r := range refs { - if r >= index { - return r, nil + if r.index >= index { + return r.index, nil } } return -1, errors.New("failed to find segment for index") } -func (w *Watcher) firstAndLast() (int, int, error) { - refs, err := w.segments(w.walDir) - if err != nil { - return -1, -1, err - } - - if len(refs) == 0 { - return -1, -1, nil - } - return refs[0], refs[len(refs)-1], nil -} - -// Copied from tsdb/wlog/wlog.go so we do not have to open a WAL. -// Plan is to move WAL watcher to TSDB and dedupe these implementations. -func (w *Watcher) segments(dir string) ([]int, error) { - files, err := os.ReadDir(dir) - if err != nil { - return nil, err - } - - var refs []int - for _, f := range files { - k, err := strconv.Atoi(f.Name()) - if err != nil { - continue - } - refs = append(refs, k) - } - slices.Sort(refs) - for i := 0; i < len(refs)-1; i++ { - if refs[i]+1 != refs[i+1] { - return nil, errors.New("segments are not sequential") - } - } - return refs, nil -} - func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, size int64) error { err := w.readSegment(r, segmentNum, tail) @@ -447,35 +409,17 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // Currently doing a garbage collect, try again later. } + // if a newer segment is produced, read the current one until the end and move on. case <-segmentTicker.C: - _, last, err := w.firstAndLast() + _, last, err := Segments(w.walDir) if err != nil { - return fmt.Errorf("segments: %w", err) + return fmt.Errorf("Segments: %w", err) } - // Check if new segments exists. - if last <= segmentNum { - continue + if last > segmentNum { + return w.readAndHandleError(reader, segmentNum, tail, size) } - err = w.readSegment(reader, segmentNum, tail) - - // Ignore errors reading to end of segment whilst replaying the WAL. - if !tail { - switch { - case err != nil && !errors.Is(err, io.EOF): - level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err) - case reader.Offset() != size: - level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) - } - return nil - } - - // Otherwise, when we are tailing, non-EOFs are fatal. - if err != nil && !errors.Is(err, io.EOF) { - return err - } - - return nil + continue // we haven't read due to a notification in quite some time, try reading anyways case <-readTicker.C: @@ -484,7 +428,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { if err != nil { return err } - // still want to reset the ticker so we don't read too often + // reset the ticker so we don't read too often readTicker.Reset(readTimeout) case <-w.readNotify: @@ -492,7 +436,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { if err != nil { return err } - // still want to reset the ticker so we don't read too often + // reset the ticker so we don't read too often readTicker.Reset(readTimeout) } } @@ -731,17 +675,17 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err } // Ensure we read the whole contents of every segment in the checkpoint dir. - segs, err := w.segments(checkpointDir) + segs, err := listSegments(checkpointDir) if err != nil { return fmt.Errorf("Unable to get segments checkpoint dir: %w", err) } - for _, seg := range segs { - size, err := getSegmentSize(checkpointDir, seg) + for _, segRef := range segs { + size, err := getSegmentSize(checkpointDir, segRef.index) if err != nil { return fmt.Errorf("getSegmentSize: %w", err) } - sr, err := OpenReadSegment(SegmentName(checkpointDir, seg)) + sr, err := OpenReadSegment(SegmentName(checkpointDir, segRef.index)) if err != nil { return fmt.Errorf("unable to open segment: %w", err) } @@ -753,7 +697,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err } if r.Offset() != size { - return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, seg, size, r.Offset()) + return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, segRef.index, size, r.Offset()) } } From 6d7ed08850849c2adcedb63cddeb61fc4f6a425e Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 6 Aug 2024 06:58:58 +0100 Subject: [PATCH 42/99] Prepare release 2.54.0-rc.1 (#14593) Signed-off-by: Bryan Boreham --- CHANGELOG.md | 5 +++++ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 ++-- 7 files changed, 19 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 115055d12..e338d9c5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 2.54.0-rc.1 / 2024-08-05 + +* [BUGFIX] TSDB: Exclude OOO chunks mapped after compaction starts (introduced by #14396). #14584 + ## 2.54.0-rc.0 / 2024-07-19 Release 2.54 brings a release candidate of a major new version of [Remote Write: 2.0](https://prometheus.io/docs/specs/remote_write_spec_2_0/). @@ -18,6 +22,7 @@ Remote-write v2 is enabled by default, but can be disabled via feature-flag `web * [ENHANCEMENT] TSDB: Optimise seek within index. #14393 * [ENHANCEMENT] TSDB: Optimise deletion of stale series. #14307 * [ENHANCEMENT] TSDB: Reduce locking to optimise adding and removing series. #13286,#14286 +* [ENHANCEMENT] TSDB: Small optimisation: streamline special handling for out-of-order data. #14396 * [ENHANCEMENT] Regexps: Optimize patterns with multiple prefixes. #13843,#14368 * [ENHANCEMENT] Regexps: Optimize patterns containing multiple literal strings. #14173 * [ENHANCEMENT] AWS SD: expose Primary IPv6 addresses as __meta_ec2_primary_ipv6_addresses. #14156 diff --git a/VERSION b/VERSION index 69539c388..149ab4732 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.54.0-rc.0 +2.54.0-rc.1 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 02c1d2286..26823cdfd 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.54.0-rc.0", + "version": "0.54.0-rc.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.54.0-rc.0", + "@prometheus-io/lezer-promql": "0.54.0-rc.1", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index af2fcae67..774c95967 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.54.0-rc.0", + "version": "0.54.0-rc.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 17bb0f272..3243fe6aa 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.54.0-rc.0", + "version": "0.54.0-rc.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.54.0-rc.0", + "version": "0.54.0-rc.1", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.54.0-rc.0", + "version": "0.54.0-rc.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.54.0-rc.0", + "@prometheus-io/lezer-promql": "0.54.0-rc.1", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -69,7 +69,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.54.0-rc.0", + "version": "0.54.0-rc.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.0", @@ -19332,7 +19332,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.54.0-rc.0", + "version": "0.54.0-rc.1", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", @@ -19350,7 +19350,7 @@ "@lezer/lr": "^1.4.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.54.0-rc.0", + "@prometheus-io/codemirror-promql": "0.54.0-rc.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", diff --git a/web/ui/package.json b/web/ui/package.json index 80e8d815f..fe1e77ef5 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.2.2", "typescript": "^4.9.5" }, - "version": "0.54.0-rc.0" + "version": "0.54.0-rc.1" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index df90049ce..b65354730 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.54.0-rc.0", + "version": "0.54.0-rc.1", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.17.0", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.4.1", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.54.0-rc.0", + "@prometheus-io/codemirror-promql": "0.54.0-rc.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", From aadec25faf7cef145dfab029767424383f51bd6d Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Tue, 6 Aug 2024 18:10:40 +1000 Subject: [PATCH 43/99] promql: Fix issue where some native histogram-related annotations are not emitted by `rate` (#14575) Signed-off-by: Charles Korn --- promql/functions.go | 11 +++++++++-- promql/promqltest/testdata/native_histograms.test | 14 ++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index dcc2cd759..b9e93b85a 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -97,9 +97,10 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod lastT = samples.Histograms[numSamplesMinusOne].T var newAnnos annotations.Annotations resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange()) + annos.Merge(newAnnos) if resultHistogram == nil { // The histograms are not compatible with each other. - return enh.Out, annos.Merge(newAnnos) + return enh.Out, annos } case len(samples.Floats) > 1: numSamplesMinusOne = len(samples.Floats) - 1 @@ -189,6 +190,12 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra var annos annotations.Annotations + // We check for gauge type histograms in the loop below, but the loop below does not run on the first and last point, + // so check the first and last point now. + if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) { + annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos)) + } + // First iteration to find out two things: // - What's the smallest relevant schema? // - Are all data points histograms? @@ -241,7 +248,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra } h.CounterResetHint = histogram.GaugeType - return h.Compact(0), nil + return h.Compact(0), annos } // === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 6a8189a54..034d73eb5 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -748,3 +748,17 @@ eval instant at 5m histogram_quantile(0.5, custom_buckets_histogram) eval instant at 5m sum(custom_buckets_histogram) {} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}} + +clear + +# Test 'this native histogram metric is not a gauge' warning for rate +load 30s + some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}} + +# Test the case where we only have two points for rate +eval_warn instant at 30s rate(some_metric[30s]) + {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} + +# Test the case where we have more than two points for rate +eval_warn instant at 1m rate(some_metric[1m]) + {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} From aff089a0142891ca61018b68e8e41ddbbe312624 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 6 Aug 2024 10:51:44 +0200 Subject: [PATCH 44/99] Reproduce recoding bug with new and missing buckets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- tsdb/chunkenc/float_histogram_test.go | 28 ++++++++++++++++++++++++++ tsdb/chunkenc/histogram_test.go | 29 +++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index 87bf61c2f..da78322cc 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -428,6 +428,34 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans) } + { // New histogram that has new buckets AND buckets missing but the buckets missing were empty. + emptyBucketH := eh.Copy() + emptyBucketH.PositiveBuckets = []float64{6, 0, 3, 2, 4, 0, 1} + c, hApp, ts, h1 := setup(emptyBucketH) + h2 := h1.Copy() + h2.PositiveSpans = []histogram.Span{ + {Offset: 0, Length: 1}, + {Offset: 3, Length: 1}, + {Offset: 3, Length: 2}, + {Offset: 5, Length: 2}, + } + h2.PositiveBuckets = []float64{7, 4, 3, 5, 2, 3} + + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) + require.NotEmpty(t, posInterjections) + require.Empty(t, negInterjections) + require.NotEmpty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) + require.True(t, ok) + require.False(t, cr) + + assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + + // Check that h2 was recoded. + require.Equal(t, []float64{7, 0, 4, 3, 5, 0, 2, 3}, h2.PositiveBuckets) + require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans) + } + { // New histogram that has a counter reset while buckets are same. c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index 939edd440..e1a82bd01 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -445,6 +445,35 @@ func TestHistogramChunkAppendable(t *testing.T) { require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans) } + { // New histogram that has new buckets AND buckets missing but the buckets missing were empty. + emptyBucketH := eh.Copy() + emptyBucketH.PositiveBuckets = []int64{6, -6, 1, 1, -2, 1, 1} // counts: 6, 0, 1, 2, 0, 1, 2 (total 12) + c, hApp, ts, h1 := setup(emptyBucketH) + h2 := h1.Copy() + h2.PositiveSpans = []histogram.Span{ // Missing buckets at offset 1 and 9. + {Offset: 0, Length: 1}, + {Offset: 3, Length: 1}, + {Offset: 3, Length: 1}, + {Offset: 4, Length: 1}, + {Offset: 1, Length: 2}, + } + h2.PositiveBuckets = []int64{7, -5, 1, 0, 1, 1} // counts: 7, 2, 3, 3, 4, 5 (total 23) + + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) + require.NotEmpty(t, posInterjections) + require.Empty(t, negInterjections) + require.NotEmpty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) + require.True(t, ok) + require.False(t, cr) + + assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + + // Check that h2 was recoded. + require.Equal(t, []int64{7, -7, 2, 1, -3, 3, 1, 1}, h2.PositiveBuckets) // counts: 7, 0, 2, 3 , 0, 3, 5 (total 23) + require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans) + } + { // New histogram that has a counter reset while buckets are same. c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() From 1b6d1366d80c12547dcda7645e4e07753656d9d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 6 Aug 2024 13:08:10 +0200 Subject: [PATCH 45/99] Fix re-code histogram and chunk re-code conflict MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- tsdb/chunkenc/float_histogram.go | 27 ++++++++--- tsdb/chunkenc/float_histogram_test.go | 8 +++- tsdb/chunkenc/histogram.go | 27 ++++++++--- tsdb/chunkenc/histogram_meta.go | 66 +++++++++++++++++++++++++++ tsdb/chunkenc/histogram_test.go | 8 +++- 5 files changed, 120 insertions(+), 16 deletions(-) diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index cc35df5ba..2af8dc507 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -419,6 +419,7 @@ loop: // fill in the bucket in b and advance a. if aCount == 0 { bInter.num++ // Mark that we need to insert a bucket in b. + bInter.bucketIdx = aIdx // Advance a if aInter.num > 0 { aInserts = append(aInserts, aInter) @@ -436,6 +437,7 @@ loop: return nil, nil, false case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare. aInter.num++ + bInter.bucketIdx = bIdx // Advance b if bInter.num > 0 { bInserts = append(bInserts, bInter) @@ -453,6 +455,7 @@ loop: // fill in the bucket in b and advance a. if aCount == 0 { bInter.num++ + bInter.bucketIdx = aIdx // Advance a if aInter.num > 0 { aInserts = append(aInserts, aInter) @@ -471,6 +474,7 @@ loop: return nil, nil, false case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. aInter.num++ + bInter.bucketIdx = bIdx // Advance b if bInter.num > 0 { bInserts = append(bInserts, bInter) @@ -773,6 +777,22 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend happ.appendFloatHistogram(t, h) return newChunk, false, app, nil } + if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 { + // The histogram needs to be expanded to have the extra empty buckets + // of the chunk. + if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 { + // No new chunks from the histogram, so the spans of the appender can accommodate the new buckets. + h.PositiveSpans = resize(h.PositiveSpans, len(a.pSpans)) + copy(h.PositiveSpans, a.pSpans) + h.NegativeSpans = resize(h.NegativeSpans, len(a.nSpans)) + copy(h.NegativeSpans, a.nSpans) + } else { + // Spans need pre-adjusting to accommodate the new buckets. + h.PositiveSpans = adjustForInserts(h.PositiveSpans, pBackwardInserts) + h.NegativeSpans = adjustForInserts(h.NegativeSpans, nBackwardInserts) + } + a.recodeHistogram(h, pBackwardInserts, nBackwardInserts) + } if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { if appendOnly { return nil, false, a, fmt.Errorf("float histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts)) @@ -784,13 +804,6 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend app.(*FloatHistogramAppender).appendFloatHistogram(t, h) return chk, true, app, nil } - if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 { - // The histogram needs to be expanded to have the extra empty buckets - // of the chunk. - h.PositiveSpans = a.pSpans - h.NegativeSpans = a.nSpans - a.recodeHistogram(h, pBackwardInserts, nBackwardInserts) - } a.appendFloatHistogram(t, h) return nil, false, a, nil } diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index da78322cc..41e76ef59 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -453,7 +453,13 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { // Check that h2 was recoded. require.Equal(t, []float64{7, 0, 4, 3, 5, 0, 2, 3}, h2.PositiveBuckets) - require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans) + require.Equal(t, []histogram.Span{ + {Offset: 0, Length: 2}, // Added empty bucket. + {Offset: 2, Length: 1}, // Existing - offset adjusted. + {Offset: 3, Length: 2}, // Existing. + {Offset: 3, Length: 1}, // Added empty bucket. + {Offset: 1, Length: 2}, // Existing + the extra bucket. + }, h2.PositiveSpans) } { // New histogram that has a counter reset while buckets are same. diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index a957d7b22..bdf4344af 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -437,6 +437,7 @@ loop: // fill in the bucket in b and advance a. if aCount == 0 { bInter.num++ // Mark that we need to insert a bucket in b. + bInter.bucketIdx = aIdx // Advance a if aInter.num > 0 { aInserts = append(aInserts, aInter) @@ -454,6 +455,7 @@ loop: return nil, nil, false case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare. aInter.num++ + aInter.bucketIdx = bIdx // Advance b if bInter.num > 0 { bInserts = append(bInserts, bInter) @@ -471,6 +473,7 @@ loop: // fill in the bucket in b and advance a. if aCount == 0 { bInter.num++ + bInter.bucketIdx = aIdx // Advance a if aInter.num > 0 { aInserts = append(aInserts, aInter) @@ -489,6 +492,7 @@ loop: return nil, nil, false case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. aInter.num++ + aInter.bucketIdx = bIdx // Advance b if bInter.num > 0 { bInserts = append(bInserts, bInter) @@ -807,6 +811,22 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h happ.appendHistogram(t, h) return newChunk, false, app, nil } + if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 { + // The histogram needs to be expanded to have the extra empty buckets + // of the chunk. + if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 { + // No new chunks from the histogram, so the spans of the appender can accommodate the new buckets. + h.PositiveSpans = resize(h.PositiveSpans, len(a.pSpans)) + copy(h.PositiveSpans, a.pSpans) + h.NegativeSpans = resize(h.NegativeSpans, len(a.nSpans)) + copy(h.NegativeSpans, a.nSpans) + } else { + // Spans need pre-adjusting to accommodate the new buckets. + h.PositiveSpans = adjustForInserts(h.PositiveSpans, pBackwardInserts) + h.NegativeSpans = adjustForInserts(h.NegativeSpans, nBackwardInserts) + } + a.recodeHistogram(h, pBackwardInserts, nBackwardInserts) + } if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { if appendOnly { return nil, false, a, fmt.Errorf("histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts)) @@ -818,13 +838,6 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h app.(*HistogramAppender).appendHistogram(t, h) return chk, true, app, nil } - if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 { - // The histogram needs to be expanded to have the extra empty buckets - // of the chunk. - h.PositiveSpans = a.pSpans - h.NegativeSpans = a.nSpans - a.recodeHistogram(h, pBackwardInserts, nBackwardInserts) - } a.appendHistogram(t, h) return nil, false, a, nil } diff --git a/tsdb/chunkenc/histogram_meta.go b/tsdb/chunkenc/histogram_meta.go index 59e2e10fc..98778e021 100644 --- a/tsdb/chunkenc/histogram_meta.go +++ b/tsdb/chunkenc/histogram_meta.go @@ -278,6 +278,10 @@ func (b *bucketIterator) Next() (int, bool) { type Insert struct { pos int num int + + // Optional: bucketIdx is the index of the bucket that is inserted. + // Can be used to adjust spans. + bucketIdx int } // Deprecated: expandSpansForward, use expandIntSpansAndBuckets or @@ -577,3 +581,65 @@ func counterResetHint(crh CounterResetHeader, numRead uint16) histogram.CounterR return histogram.UnknownCounterReset } } + +// adjustForInserts adjusts the spans for the given inserts. +func adjustForInserts(spans []histogram.Span, inserts []Insert) (mergedSpans []histogram.Span) { + if len(inserts) == 0 { + return spans + } + + it := newBucketIterator(spans) + + var ( + lastBucket int + i int + insertIdx int = inserts[i].bucketIdx + insertNum int = inserts[i].num + ) + + addBucket := func(b int) { + offset := b - lastBucket - 1 + if offset == 0 && len(mergedSpans) > 0 { + mergedSpans[len(mergedSpans)-1].Length++ + } else { + if len(mergedSpans) == 0 { + offset++ + } + mergedSpans = append(mergedSpans, histogram.Span{ + Offset: int32(offset), + Length: 1, + }) + } + + lastBucket = b + } + consumeInsert := func() { + // Consume the insert. + insertNum-- + if insertNum == 0 { + i++ + if i < len(inserts) { + insertIdx = inserts[i].bucketIdx + insertNum = inserts[i].num + } + } else { + insertIdx++ + } + } + + bucket, ok := it.Next() + for ok { + if i < len(inserts) && insertIdx < bucket { + addBucket(insertIdx) + consumeInsert() + } else { + addBucket(bucket) + bucket, ok = it.Next() + } + } + for i < len(inserts) { + addBucket(inserts[i].bucketIdx) + consumeInsert() + } + return +} diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index e1a82bd01..d44be69df 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -471,7 +471,13 @@ func TestHistogramChunkAppendable(t *testing.T) { // Check that h2 was recoded. require.Equal(t, []int64{7, -7, 2, 1, -3, 3, 1, 1}, h2.PositiveBuckets) // counts: 7, 0, 2, 3 , 0, 3, 5 (total 23) - require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans) + require.Equal(t, []histogram.Span{ + {Offset: 0, Length: 2}, // Added empty bucket. + {Offset: 2, Length: 1}, // Existing - offset adjusted. + {Offset: 3, Length: 2}, // Added empty bucket. + {Offset: 3, Length: 1}, // Existing - offset adjusted. + {Offset: 1, Length: 2}, // Existing. + }, h2.PositiveSpans) } { // New histogram that has a counter reset while buckets are same. From d2f6fa72892ef9d5c94b789d08856061eb27dc92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 6 Aug 2024 13:24:46 +0200 Subject: [PATCH 46/99] Fix lint error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- tsdb/chunkenc/histogram_meta.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tsdb/chunkenc/histogram_meta.go b/tsdb/chunkenc/histogram_meta.go index 98778e021..8d614b817 100644 --- a/tsdb/chunkenc/histogram_meta.go +++ b/tsdb/chunkenc/histogram_meta.go @@ -593,8 +593,8 @@ func adjustForInserts(spans []histogram.Span, inserts []Insert) (mergedSpans []h var ( lastBucket int i int - insertIdx int = inserts[i].bucketIdx - insertNum int = inserts[i].num + insertIdx = inserts[i].bucketIdx + insertNum = inserts[i].num ) addBucket := func(b int) { From 6900bf48d06837e3a79ef31829565721eca84cbe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 13:55:03 +0000 Subject: [PATCH 47/99] build(deps): bump github.com/aws/aws-sdk-go from 1.54.19 to 1.55.5 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.54.19 to 1.55.5. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.54.19...v1.55.5) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 138e1bae9..02844cd8a 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/KimMachineGun/automemlimit v0.6.1 github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 - github.com/aws/aws-sdk-go v1.54.19 + github.com/aws/aws-sdk-go v1.55.5 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 diff --git a/go.sum b/go.sum index fb5c1772a..9841b5428 100644 --- a/go.sum +++ b/go.sum @@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= -github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= From 98ecdf35891cfd65d7cf01dc228c3d5d0dc82f6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 6 Aug 2024 16:51:20 +0200 Subject: [PATCH 48/99] Fix corrupting spans via iterator sharing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Iterator may share spans without copy, so we always have to make a copy before modification - copy-on-write. Signed-off-by: György Krajcsovits --- tsdb/chunkenc/float_histogram.go | 5 +++-- tsdb/chunkenc/float_histogram_test.go | 4 ++++ tsdb/chunkenc/histogram.go | 5 +++-- tsdb/chunkenc/histogram_test.go | 4 ++++ 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index 2af8dc507..a5f123bc9 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -782,9 +782,10 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend // of the chunk. if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 { // No new chunks from the histogram, so the spans of the appender can accommodate the new buckets. - h.PositiveSpans = resize(h.PositiveSpans, len(a.pSpans)) + // However we need to make a copy in case the input is sharing spans from an iterator. + h.PositiveSpans = make([]histogram.Span, len(a.pSpans)) copy(h.PositiveSpans, a.pSpans) - h.NegativeSpans = resize(h.NegativeSpans, len(a.nSpans)) + h.NegativeSpans = make([]histogram.Span, len(a.nSpans)) copy(h.NegativeSpans, a.nSpans) } else { // Spans need pre-adjusting to accommodate the new buckets. diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index 41e76ef59..689696f5a 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -411,6 +411,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { {Offset: 3, Length: 2}, {Offset: 5, Length: 1}, } + savedH2Spans := h2.PositiveSpans h2.PositiveBuckets = []float64{7, 4, 3, 5, 2} posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) @@ -426,6 +427,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { // Check that h2 was recoded. require.Equal(t, []float64{7, 0, 4, 3, 5, 0, 2}, h2.PositiveBuckets) require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans) + require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy") } { // New histogram that has new buckets AND buckets missing but the buckets missing were empty. @@ -439,6 +441,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { {Offset: 3, Length: 2}, {Offset: 5, Length: 2}, } + savedH2Spans := h2.PositiveSpans h2.PositiveBuckets = []float64{7, 4, 3, 5, 2, 3} posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) @@ -460,6 +463,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { {Offset: 3, Length: 1}, // Added empty bucket. {Offset: 1, Length: 2}, // Existing + the extra bucket. }, h2.PositiveSpans) + require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy") } { // New histogram that has a counter reset while buckets are same. diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index bdf4344af..fafae48d3 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -816,9 +816,10 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h // of the chunk. if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 { // No new chunks from the histogram, so the spans of the appender can accommodate the new buckets. - h.PositiveSpans = resize(h.PositiveSpans, len(a.pSpans)) + // However we need to make a copy in case the input is sharing spans from an iterator. + h.PositiveSpans = make([]histogram.Span, len(a.pSpans)) copy(h.PositiveSpans, a.pSpans) - h.NegativeSpans = resize(h.NegativeSpans, len(a.nSpans)) + h.NegativeSpans = make([]histogram.Span, len(a.nSpans)) copy(h.NegativeSpans, a.nSpans) } else { // Spans need pre-adjusting to accommodate the new buckets. diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index d44be69df..59187ed17 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -428,6 +428,7 @@ func TestHistogramChunkAppendable(t *testing.T) { {Offset: 4, Length: 1}, {Offset: 1, Length: 1}, } + savedH2Spans := h2.PositiveSpans h2.PositiveBuckets = []int64{7, -5, 1, 0, 1} // counts: 7, 2, 3, 3, 4 (total 18) posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) @@ -443,6 +444,7 @@ func TestHistogramChunkAppendable(t *testing.T) { // Check that h2 was recoded. require.Equal(t, []int64{7, -7, 2, 1, -3, 3, 1}, h2.PositiveBuckets) // counts: 7, 0, 2, 3 , 0, 3, 4 (total 18) require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans) + require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy") } { // New histogram that has new buckets AND buckets missing but the buckets missing were empty. @@ -457,6 +459,7 @@ func TestHistogramChunkAppendable(t *testing.T) { {Offset: 4, Length: 1}, {Offset: 1, Length: 2}, } + savedH2Spans := h2.PositiveSpans h2.PositiveBuckets = []int64{7, -5, 1, 0, 1, 1} // counts: 7, 2, 3, 3, 4, 5 (total 23) posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) @@ -478,6 +481,7 @@ func TestHistogramChunkAppendable(t *testing.T) { {Offset: 3, Length: 1}, // Existing - offset adjusted. {Offset: 1, Length: 2}, // Existing. }, h2.PositiveSpans) + require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy") } { // New histogram that has a counter reset while buckets are same. From 626f2f3571228195c8c1d4c596295cb5a1321a2f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 12:57:59 +0000 Subject: [PATCH 49/99] build(deps): bump github.com/docker/docker Bumps [github.com/docker/docker](https://github.com/docker/docker) from 27.0.3+incompatible to 27.1.1+incompatible. - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v27.0.3...v27.1.1) --- updated-dependencies: - dependency-name: github.com/docker/docker dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 138e1bae9..cff8f961d 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 github.com/digitalocean/godo v1.119.0 - github.com/docker/docker v27.0.3+incompatible + github.com/docker/docker v27.1.1+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.12.0 github.com/envoyproxy/protoc-gen-validate v1.0.4 diff --git a/go.sum b/go.sum index fb5c1772a..e751396ec 100644 --- a/go.sum +++ b/go.sum @@ -149,8 +149,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= -github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= +github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= From 37c8c9257b5f8ca59c72c0744450887befc04ad9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 6 Aug 2024 07:46:26 +0200 Subject: [PATCH 50/99] Fix histogram pool poisoning bu chunkenc.Iterator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit chunkenc.Iterator.AtFloatHistogram may do a shallow copy if it receives nil as input pointer. This can in turn share the span slice with multiple histograms in the matrixSelectorHPool, leading to unexpected errors. Signed-off-by: György Krajcsovits --- promql/engine.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/promql/engine.go b/promql/engine.go index 25e67db63..621c2116e 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2356,6 +2356,11 @@ loop: } else { histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}}) } + if histograms[n].H == nil { + // Initialize to non zero to AtFloatHistogram does a copy for sure. + // Not an issue in the loop above since that uses an intermediate buffer. + histograms[n].H = &histogram.FloatHistogram{} + } histograms[n].T, histograms[n].H = it.AtFloatHistogram(histograms[n].H) if value.IsStaleNaN(histograms[n].H.Sum) { histograms = histograms[:n] From b91acc61b0115c7e3216fa5960ec207f57ef4aa6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 6 Aug 2024 20:48:16 +0200 Subject: [PATCH 51/99] Add unit test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- promql/engine_test.go | 59 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/promql/engine_test.go b/promql/engine_test.go index 8e618d435..8d5f87244 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3797,3 +3797,62 @@ func makeInt64Pointer(val int64) *int64 { *valp = val return valp } + +func TestHistogramCopyFromIteratorRegression(t *testing.T) { + // Loading the following histograms creates two chunks because there's a + // counter reset. Not only the counter is lower in the last histogram + // but also there's missing buckets. + // This in turns means that chunk iterators will have different spans. + load := `load 1m +histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:1 count:1 buckets:[1]}} +` + storage := promqltest.LoadedStorage(t, load) + t.Cleanup(func() { storage.Close() }) + engine := promqltest.NewTestEngine(false, 0, promqltest.DefaultMaxSamplesPerQuery) + + verify := func(t *testing.T, qry promql.Query, expected []histogram.FloatHistogram) { + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + + m, ok := res.Value.(promql.Matrix) + require.True(t, ok) + + require.Len(t, m, 1) + series := m[0] + + require.Empty(t, series.Floats) + require.Len(t, series.Histograms, len(expected)) + for i, e := range expected { + series.Histograms[i].H.CounterResetHint = histogram.UnknownCounterReset // Don't care. + require.Equal(t, &e, series.Histograms[i].H) + } + } + + qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[60s])", time.Unix(0, 0), time.Unix(0, 0).Add(1*time.Minute), time.Minute) + require.NoError(t, err) + verify(t, qry, []histogram.FloatHistogram{ + { + Count: 2, + Sum: 2, // Increase from 4 to 6 is 2. + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, // Two buckets changed between the first and second histogram. + PositiveBuckets: []float64{1, 1}, // Increase from 2 to 3 is 1 in both buckets. + }, + }) + + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[60s]", time.Unix(0, 0).Add(2*time.Minute)) + require.NoError(t, err) + verify(t, qry, []histogram.FloatHistogram{ + { + Count: 6, + Sum: 6, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []float64{3, 3}, + }, + { + Count: 1, + Sum: 1, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []float64{1}, + }, + }) +} From f6e4b775e273c719acc4ff2d01c66934d66acea6 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 7 Aug 2024 14:25:46 +1000 Subject: [PATCH 52/99] Check for errors first Signed-off-by: Charles Korn --- promql/promqltest/test.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index 83137e661..8b1ec381a 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -1003,13 +1003,6 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error { return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err) } res := q.Exec(t.context) - countWarnings, _ := res.Warnings.CountWarningsAndInfo() - if !cmd.warn && countWarnings > 0 { - return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings) - } - if cmd.warn && countWarnings == 0 { - return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line) - } if res.Err != nil { if cmd.fail { return cmd.checkExpectedFailure(res.Err) @@ -1020,6 +1013,13 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error { if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line) } + countWarnings, _ := res.Warnings.CountWarningsAndInfo() + if !cmd.warn && countWarnings > 0 { + return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings) + } + if cmd.warn && countWarnings == 0 { + return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line) + } defer q.Close() if err := cmd.compareResult(res.Value); err != nil { @@ -1050,13 +1050,6 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq } defer q.Close() res := q.Exec(t.context) - countWarnings, _ := res.Warnings.CountWarningsAndInfo() - if !cmd.warn && countWarnings > 0 { - return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) - } - if cmd.warn && countWarnings == 0 { - return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) - } if res.Err != nil { if cmd.fail { if err := cmd.checkExpectedFailure(res.Err); err != nil { @@ -1070,6 +1063,13 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) } + countWarnings, _ := res.Warnings.CountWarningsAndInfo() + if !cmd.warn && countWarnings > 0 { + return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) + } + if cmd.warn && countWarnings == 0 { + return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) + } err = cmd.compareResult(res.Value) if err != nil { return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err) From 424cefcf5e34f8b3266dbc77df933fa7b94961aa Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 7 Aug 2024 14:39:50 +1000 Subject: [PATCH 53/99] Fix "cannot reduce resolution to custom buckets schema" panic in `rate` over native histograms with mix of custom and exponential buckets Signed-off-by: Charles Korn --- promql/functions.go | 9 ++++++++ .../testdata/native_histograms.test | 22 +++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/promql/functions.go b/promql/functions.go index 35dbd2970..018023bf0 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -179,15 +179,21 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod // Otherwise, it returns the calculated histogram and an empty annotation. func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) { prev := points[0].H + usingCustomBuckets := prev.UsesCustomBuckets() last := points[len(points)-1].H if last == nil { return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } + minSchema := prev.Schema if last.Schema < minSchema { minSchema = last.Schema } + if last.UsesCustomBuckets() != usingCustomBuckets { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } + var annos annotations.Annotations // We check for gauge type histograms in the loop below, but the loop below does not run on the first and last point, @@ -215,6 +221,9 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra if curr.Schema < minSchema { minSchema = curr.Schema } + if curr.UsesCustomBuckets() != usingCustomBuckets { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } } h := last.CopyToSchema(minSchema) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 034d73eb5..f91626c34 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -762,3 +762,25 @@ eval_warn instant at 30s rate(some_metric[30s]) # Test the case where we have more than two points for rate eval_warn instant at 1m rate(some_metric[1m]) {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} + +clear + +# Test rate() over mixed exponential and custom buckets. +load 30s + some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +# Start and end with exponential, with custom in the middle. +eval_warn instant at 1m rate(some_metric[1m]) + # Should produce no results. + +# Start and end with custom, with exponential in the middle. +eval_warn instant at 1m30s rate(some_metric[1m]) + # Should produce no results. + +# Start with custom, end with exponential. +eval_warn instant at 1m rate(some_metric[30s]) + # Should produce no results. + +# Start with exponential, end with custom. +eval_warn instant at 30s rate(some_metric[30s]) + # Should produce no results. From 5ee94f49a22c9c9df338f7acc27044fe75f078af Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 7 Aug 2024 15:30:01 +1000 Subject: [PATCH 54/99] Fix issue where `sum` over mixed exponential and custom buckets, or incompatible custom buckets, produces incorrect results Signed-off-by: Charles Korn --- promql/engine.go | 19 +++++++++---- .../testdata/native_histograms.test | 27 +++++++++++++++++++ 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 14c370606..efb6c583f 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2784,6 +2784,7 @@ type groupedAggregation struct { seen bool // Was this output groups seen in the input at this timestamp. hasFloat bool // Has at least 1 float64 sample aggregated. hasHistogram bool // Has at least 1 histogram sample aggregated. + abandonHistogram bool // If true, group has seen mixed exponential and custom buckets, or incompatible custom buckets. groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group. incrementalMean bool // True after reverting to incremental calculation of the mean value. } @@ -2809,10 +2810,11 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // Initialize this group if it's the first time we've seen it. if !group.seen { *group = groupedAggregation{ - seen: true, - floatValue: f, - floatMean: f, - groupCount: 1, + seen: true, + floatValue: f, + floatMean: f, + abandonHistogram: false, + groupCount: 1, } switch op { case parser.AVG, parser.SUM: @@ -2833,6 +2835,10 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix continue } + if group.abandonHistogram { + continue + } + switch op { case parser.SUM: if h != nil { @@ -2841,6 +2847,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix _, err := group.histogramValue.Add(h) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.abandonHistogram = true } } // Otherwise the aggregation contained floats @@ -2987,7 +2994,9 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange())) continue } - if aggr.hasHistogram { + if aggr.abandonHistogram { + continue + } else if aggr.hasHistogram { aggr.histogramValue.Compact(0) } else { aggr.floatValue += aggr.floatKahanC diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 034d73eb5..fc0517f75 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -762,3 +762,30 @@ eval_warn instant at 30s rate(some_metric[30s]) # Test the case where we have more than two points for rate eval_warn instant at 1m rate(some_metric[1m]) {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} + +# Test mixing exponential and custom buckets. +load 6m + metric{series="exponential"} {{sum:4 count:3 buckets:[1 2 1]}} _ {{sum:4 count:3 buckets:[1 2 1]}} + metric{series="other-exponential"} {{sum:3 count:2 buckets:[1 1 1]}} _ {{sum:3 count:2 buckets:[1 1 1]}} + metric{series="custom"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{series="other-custom"} _ {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} + +# T=0: only exponential +# T=6: only custom +# T=12: mixed, should be ignored and emit an warning +eval_warn range from 0 to 12m step 6m sum(metric) + {} {{sum:7 count:5 buckets:[2 3 2]}} {{schema:-53 sum:16 count:3 custom_values:[5 10] buckets:[1 2]}} _ + +clear + +# Test incompatible custom bucket schemas. +load 6m + metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} + metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +# T=0: incompatible, should be ignored and emit a warning +# T=6: compatible +# T=12: incompatible followed by compatible, should be ignored and emit a warning +eval_warn range from 0 to 12m step 6m sum(metric) + {} _ {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} _ From f07b3ae67be5620cef9ff5520ba44d94f1216cb9 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 7 Aug 2024 15:32:35 +1000 Subject: [PATCH 55/99] Fix issue where `avg` over mixed exponential and custom buckets, or incompatible custom buckets, produces incorrect results or panics Signed-off-by: Charles Korn --- promql/engine.go | 6 ++++++ promql/promqltest/testdata/native_histograms.test | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/promql/engine.go b/promql/engine.go index efb6c583f..d51ed92c5 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2868,10 +2868,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix toAdd, err := left.Sub(right) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.abandonHistogram = true + continue } _, err = group.histogramValue.Add(toAdd) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.abandonHistogram = true + continue } } // Otherwise the aggregation contained floats @@ -2968,6 +2972,8 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix continue } switch { + case aggr.abandonHistogram: + continue case aggr.hasHistogram: aggr.histogramValue = aggr.histogramValue.Compact(0) case aggr.incrementalMean: diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index fc0517f75..62fac87c1 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -776,6 +776,9 @@ load 6m eval_warn range from 0 to 12m step 6m sum(metric) {} {{sum:7 count:5 buckets:[2 3 2]}} {{schema:-53 sum:16 count:3 custom_values:[5 10] buckets:[1 2]}} _ +eval_warn range from 0 to 12m step 6m avg(metric) + {} {{sum:3.5 count:2.5 buckets:[1 1.5 1]}} {{schema:-53 sum:8 count:1.5 custom_values:[5 10] buckets:[0.5 1]}} _ + clear # Test incompatible custom bucket schemas. @@ -789,3 +792,6 @@ load 6m # T=12: incompatible followed by compatible, should be ignored and emit a warning eval_warn range from 0 to 12m step 6m sum(metric) {} _ {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} _ + +eval_warn range from 0 to 12m step 6m avg(metric) + {} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _ From 0f4bc87b4fde3b4d9483a62a6b4f8fe3286c84bd Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 7 Aug 2024 15:35:06 +1000 Subject: [PATCH 56/99] Make linter happy Signed-off-by: Charles Korn --- promql/engine.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index d51ed92c5..6f0c64d42 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3000,11 +3000,12 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange())) continue } - if aggr.abandonHistogram { + switch { + case aggr.abandonHistogram: continue - } else if aggr.hasHistogram { + case aggr.hasHistogram: aggr.histogramValue.Compact(0) - } else { + default: aggr.floatValue += aggr.floatKahanC } default: From 0833d2a230c422ad5ce99e096702ce1802efae73 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Wed, 7 Aug 2024 15:02:59 +0200 Subject: [PATCH 57/99] Fix appendable: check whether last val was a histogram (#14613) * Fix appendable: check whether last val was a histogram When appending a float, we were checking whether lastValue was equal to current value, but we didn't check whether last value was a float value. Signed-off-by: Oleg Zaytsev --- storage/errors.go | 20 +++++++++++++++++--- storage/errors_test.go | 38 ++++++++++++++++++++++++++++++++++++++ tsdb/head_append.go | 3 +++ tsdb/head_test.go | 29 +++++++++++++++++++++++++++++ 4 files changed, 87 insertions(+), 3 deletions(-) create mode 100644 storage/errors_test.go diff --git a/storage/errors.go b/storage/errors.go index eff70f678..dd48066db 100644 --- a/storage/errors.go +++ b/storage/errors.go @@ -16,9 +16,10 @@ package storage import "fmt" type errDuplicateSampleForTimestamp struct { - timestamp int64 - existing float64 - newValue float64 + timestamp int64 + existing float64 + existingIsHistogram bool + newValue float64 } func NewDuplicateFloatErr(t int64, existing, newValue float64) error { @@ -29,13 +30,26 @@ func NewDuplicateFloatErr(t int64, existing, newValue float64) error { } } +// NewDuplicateHistogramToFloatErr describes an error where a new float sample is sent for same timestamp as previous histogram. +func NewDuplicateHistogramToFloatErr(t int64, newValue float64) error { + return errDuplicateSampleForTimestamp{ + timestamp: t, + existingIsHistogram: true, + newValue: newValue, + } +} + func (e errDuplicateSampleForTimestamp) Error() string { if e.timestamp == 0 { return "duplicate sample for timestamp" } + if e.existingIsHistogram { + return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing is a histogram, new value %g", e.timestamp, e.newValue) + } return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing %g, new value %g", e.timestamp, e.existing, e.newValue) } +// Is implements the anonymous interface checked by errors.Is. // Every errDuplicateSampleForTimestamp compares equal to the global ErrDuplicateSampleForTimestamp. func (e errDuplicateSampleForTimestamp) Is(t error) bool { if t == ErrDuplicateSampleForTimestamp { diff --git a/storage/errors_test.go b/storage/errors_test.go new file mode 100644 index 000000000..b3e202b49 --- /dev/null +++ b/storage/errors_test.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestErrDuplicateSampleForTimestamp(t *testing.T) { + // All errDuplicateSampleForTimestamp are ErrDuplicateSampleForTimestamp + require.ErrorIs(t, ErrDuplicateSampleForTimestamp, errDuplicateSampleForTimestamp{}) + + // Same type only is if it has same properties. + err := NewDuplicateFloatErr(1_000, 10, 20) + sameErr := NewDuplicateFloatErr(1_000, 10, 20) + differentErr := NewDuplicateFloatErr(1_001, 30, 40) + + require.ErrorIs(t, err, sameErr) + require.NotErrorIs(t, err, differentErr) + + // Also works when err is wrapped. + require.ErrorIs(t, fmt.Errorf("failed: %w", err), sameErr) + require.NotErrorIs(t, fmt.Errorf("failed: %w", err), differentErr) +} diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 8d66d1e81..bdde0d7f8 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -466,6 +466,9 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi // like federation and erroring out at that time would be extremely noisy. // This only checks against the latest in-order sample. // The OOO headchunk has its own method to detect these duplicates. + if s.lastHistogramValue != nil || s.lastFloatHistogramValue != nil { + return false, 0, storage.NewDuplicateHistogramToFloatErr(t, v) + } if math.Float64bits(s.lastValue) != math.Float64bits(v) { return false, 0, storage.NewDuplicateFloatErr(t, s.lastValue, v) } diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 09927c23c..fb73a3638 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -5919,6 +5919,35 @@ func TestPostingsCardinalityStats(t *testing.T) { require.Equal(t, statsForSomeLabel1, head.PostingsCardinalityStats("n", 1)) } +func TestHeadAppender_AppendFloatWithSameTimestampAsPreviousHistogram(t *testing.T) { + head, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) + t.Cleanup(func() { head.Close() }) + + ls := labels.FromStrings(labels.MetricName, "test") + + { + // Append a float 10.0 @ 1_000 + app := head.Appender(context.Background()) + _, err := app.Append(0, ls, 1_000, 10.0) + require.NoError(t, err) + require.NoError(t, app.Commit()) + } + + { + // Append a float histogram @ 2_000 + app := head.Appender(context.Background()) + h := tsdbutil.GenerateTestHistogram(1) + _, err := app.AppendHistogram(0, ls, 2_000, h, nil) + require.NoError(t, err) + require.NoError(t, app.Commit()) + } + + app := head.Appender(context.Background()) + _, err := app.Append(0, ls, 2_000, 10.0) + require.Error(t, err) + require.ErrorIs(t, err, storage.NewDuplicateHistogramToFloatErr(2_000, 10.0)) +} + func TestHeadAppender_AppendCTZeroSample(t *testing.T) { type appendableSamples struct { ts int64 From 92873d3009bdcf3c961df76167250f8c545351e2 Mon Sep 17 00:00:00 2001 From: machine424 Date: Mon, 8 Apr 2024 14:59:30 +0200 Subject: [PATCH 58/99] feat: allow to delay head compaction start time helping Prometheus instances to avoid simultaneous compactions and reduce stress on shared resources. This is enabled via `--enable-feature=delayed-compaction`. Signed-off-by: machine424 --- cmd/prometheus/main.go | 7 +- docs/command-line/prometheus.md | 2 +- docs/feature_flags.md | 14 ++ tsdb/compact_test.go | 227 ++++++++++++++++++++++++++++++++ tsdb/db.go | 44 ++++++- tsdb/db_test.go | 22 ++++ 6 files changed, 313 insertions(+), 3 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 1d844ddba..d7333b657 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -234,6 +234,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + case "delayed-compaction": + c.tsdb.EnableDelayedCompaction = true + level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.") case "": continue case "promql-at-modifier", "promql-negative-offset": @@ -475,7 +478,7 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) promlogflag.AddFlags(a, &cfg.promlogConfig) @@ -1715,6 +1718,7 @@ type tsdbOptions struct { MaxExemplars int64 EnableMemorySnapshotOnShutdown bool EnableNativeHistograms bool + EnableDelayedCompaction bool } func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { @@ -1736,6 +1740,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { EnableNativeHistograms: opts.EnableNativeHistograms, OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow, EnableOverlappingCompaction: true, + EnableDelayedCompaction: opts.EnableDelayedCompaction, } } diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 2faf65105..b8f2e4241 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -56,7 +56,7 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 24d70647f..3f92ab7fd 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -234,3 +234,17 @@ metadata changes as WAL records on a per-series basis. This must be used if you are also using remote write 2.0 as it will only gather metadata from the WAL. + +## Delay compaction start time + +`--enable-feature=delayed-compaction` + +A random offset, up to `10%` of the chunk range, is added to the Head compaction start time. This assists Prometheus instances in avoiding simultaneous compactions and reduces the load on shared resources. + +Only auto Head compactions and the operations directly resulting from them are subject to this delay. + +In the event of multiple consecutive Head compactions being possible, only the first compaction experiences this delay. + +Note that during this delay, the Head continues its usual operations, which include serving and appending series. + +Despite the delay in compaction, the blocks produced are time-aligned in the same manner as they would be if the delay was not in place. diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 0df6ca050..0ea155d10 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -22,6 +22,7 @@ import ( "os" "path" "path/filepath" + "runtime" "strconv" "sync" "testing" @@ -1925,3 +1926,229 @@ func TestCompactEmptyResultBlockWithTombstone(t *testing.T) { require.Nil(t, ulids) require.NoError(t, block.Close()) } + +func TestDelayedCompaction(t *testing.T) { + // The delay is chosen in such a way as to not slow down the tests, but also to make + // the effective compaction duration negligible compared to it, so that the duration comparisons make sense. + delay := 1000 * time.Millisecond + + waitUntilCompactedAndCheck := func(db *DB) { + t.Helper() + start := time.Now() + for db.head.compactable() { + // This simulates what happens at the end of commits, for less busy DB, a compaction + // is triggered every minute. This is to speed up the test. + select { + case db.compactc <- struct{}{}: + default: + } + time.Sleep(time.Millisecond) + } + duration := time.Since(start) + // Only waited for one offset: offset<=delay<<<2*offset + require.Greater(t, duration, db.opts.CompactionDelay) + require.Less(t, duration, 2*db.opts.CompactionDelay) + } + + compactAndCheck := func(db *DB) { + t.Helper() + start := time.Now() + db.Compact(context.Background()) + for db.head.compactable() { + time.Sleep(time.Millisecond) + } + if runtime.GOOS == "windows" { + // TODO: enable on windows once ms resolution timers are better supported. + return + } + duration := time.Since(start) + require.Less(t, duration, delay) + } + + cases := []struct { + name string + // The delays are chosen in such a way as to not slow down the tests, but also in a way to make the + // effective compaction duration negligible compared to them, so that the duration comparisons make sense. + compactionDelay time.Duration + }{ + { + "delayed compaction not enabled", + 0, + }, + { + "delayed compaction enabled", + delay, + }, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + var options *Options + if c.compactionDelay > 0 { + options = &Options{CompactionDelay: c.compactionDelay} + } + db := openTestDB(t, options, []int64{10}) + defer func() { + require.NoError(t, db.Close()) + }() + + label := labels.FromStrings("foo", "bar") + + // The first compaction is expected to result in 1 block. + db.DisableCompactions() + app := db.Appender(context.Background()) + _, err := app.Append(0, label, 0, 0) + require.NoError(t, err) + _, err = app.Append(0, label, 11, 0) + require.NoError(t, err) + _, err = app.Append(0, label, 21, 0) + require.NoError(t, err) + require.NoError(t, app.Commit()) + + if c.compactionDelay == 0 { + // When delay is not enabled, compaction should run on the first trigger. + compactAndCheck(db) + } else { + db.EnableCompactions() + waitUntilCompactedAndCheck(db) + // The db.compactc signals have been processed multiple times since a compaction is triggered every 1ms by waitUntilCompacted. + // This implies that the compaction delay doesn't block or wait on the initial trigger. + // 3 is an arbitrary value because it's difficult to determine the precise value. + require.GreaterOrEqual(t, prom_testutil.ToFloat64(db.metrics.compactionsTriggered)-prom_testutil.ToFloat64(db.metrics.compactionsSkipped), 3.0) + // The delay doesn't change the head blocks alignement. + require.Eventually(t, func() bool { + return db.head.MinTime() == db.compactor.(*LeveledCompactor).ranges[0]+1 + }, 500*time.Millisecond, 10*time.Millisecond) + // One compaction was run and one block was produced. + require.Equal(t, 1.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)) + } + + // The second compaction is expected to result in 2 blocks. + // This ensures that the logic for compaction delay doesn't only work for the first compaction, but also takes into account the future compactions. + // This also ensures that no delay happens between consecutive compactions. + db.DisableCompactions() + app = db.Appender(context.Background()) + _, err = app.Append(0, label, 31, 0) + require.NoError(t, err) + _, err = app.Append(0, label, 41, 0) + require.NoError(t, err) + require.NoError(t, app.Commit()) + + if c.compactionDelay == 0 { + // Compaction should still run on the first trigger. + compactAndCheck(db) + } else { + db.EnableCompactions() + waitUntilCompactedAndCheck(db) + } + + // Two other compactions were run. + require.Eventually(t, func() bool { + return prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran) == 3.0 + }, 500*time.Millisecond, 10*time.Millisecond) + + if c.compactionDelay == 0 { + return + } + + // This test covers a special case. If auto compaction is in a delay period and a manual compaction is triggered, + // auto compaction should stop waiting for the delay if the head is no longer compactable. + // Of course, if the head is still compactable after the manual compaction, auto compaction will continue waiting for the same delay. + getTimeWhenCompactionDelayStarted := func() time.Time { + t.Helper() + db.cmtx.Lock() + defer db.cmtx.Unlock() + return db.timeWhenCompactionDelayStarted + } + + db.DisableCompactions() + app = db.Appender(context.Background()) + _, err = app.Append(0, label, 51, 0) + require.NoError(t, err) + require.NoError(t, app.Commit()) + + require.True(t, db.head.compactable()) + db.EnableCompactions() + // Trigger an auto compaction. + db.compactc <- struct{}{} + // That made auto compaction start waiting for the delay. + require.Eventually(t, func() bool { + return !getTimeWhenCompactionDelayStarted().IsZero() + }, 100*time.Millisecond, 10*time.Millisecond) + // Trigger a manual compaction. + require.NoError(t, db.CompactHead(NewRangeHead(db.Head(), 0, 50.0))) + require.Equal(t, 4.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)) + // Re-trigger an auto compaction. + db.compactc <- struct{}{} + // That made auto compaction stop waiting for the delay. + require.Eventually(t, func() bool { + return getTimeWhenCompactionDelayStarted().IsZero() + }, 100*time.Millisecond, 10*time.Millisecond) + }) + } +} + +// TestDelayedCompactionDoesNotBlockUnrelatedOps makes sure that when delayed compaction is enabled, +// operations that don't directly derive from the Head compaction are not delayed, here we consider disk blocks compaction. +func TestDelayedCompactionDoesNotBlockUnrelatedOps(t *testing.T) { + cases := []struct { + name string + whenCompactable bool + }{ + { + "Head is compactable", + true, + }, + { + "Head is not compactable", + false, + }, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + tmpdir := t.TempDir() + // Some blocks that need compation are present. + createBlock(t, tmpdir, genSeries(1, 1, 0, 100)) + createBlock(t, tmpdir, genSeries(1, 1, 100, 200)) + createBlock(t, tmpdir, genSeries(1, 1, 200, 300)) + + options := DefaultOptions() + // This will make the test timeout if compaction really waits for it. + options.CompactionDelay = time.Hour + db, err := open(tmpdir, log.NewNopLogger(), nil, options, []int64{10, 200}, nil) + require.NoError(t, err) + defer func() { + require.NoError(t, db.Close()) + }() + + db.DisableCompactions() + require.Len(t, db.Blocks(), 3) + + if c.whenCompactable { + label := labels.FromStrings("foo", "bar") + app := db.Appender(context.Background()) + _, err := app.Append(0, label, 301, 0) + require.NoError(t, err) + _, err = app.Append(0, label, 317, 0) + require.NoError(t, err) + require.NoError(t, app.Commit()) + // The Head is compactable and will still be at the end. + require.True(t, db.head.compactable()) + defer func() { + require.True(t, db.head.compactable()) + }() + } + + // The blocks were compacted. + db.Compact(context.Background()) + require.Len(t, db.Blocks(), 2) + }) + } +} diff --git a/tsdb/db.go b/tsdb/db.go index 87870a847..3c73c892a 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -21,6 +21,7 @@ import ( "io" "io/fs" "math" + "math/rand" "os" "path/filepath" "slices" @@ -84,6 +85,8 @@ func DefaultOptions() *Options { OutOfOrderCapMax: DefaultOutOfOrderCapMax, EnableOverlappingCompaction: true, EnableSharding: false, + EnableDelayedCompaction: false, + CompactionDelay: time.Duration(0), } } @@ -190,6 +193,13 @@ type Options struct { // EnableSharding enables query sharding support in TSDB. EnableSharding bool + // EnableDelayedCompaction, when set to true, assigns a random value to CompactionDelay during DB opening. + // When set to false, delayed compaction is disabled, unless CompactionDelay is set directly. + EnableDelayedCompaction bool + // CompactionDelay delays the start time of auto compactions. + // It can be increased by up to one minute if the DB does not commit too often. + CompactionDelay time.Duration + // NewCompactorFunc is a function that returns a TSDB compactor. NewCompactorFunc NewCompactorFunc @@ -246,6 +256,9 @@ type DB struct { // Cancel a running compaction when a shutdown is initiated. compactCancel context.CancelFunc + // timeWhenCompactionDelayStarted helps delay the compactions start time. + timeWhenCompactionDelayStarted time.Time + // oooWasEnabled is true if out of order support was enabled at least one time // during the time TSDB was up. In which case we need to keep supporting // out-of-order compaction and vertical queries. @@ -998,6 +1011,10 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs db.oooWasEnabled.Store(true) } + if opts.EnableDelayedCompaction { + opts.CompactionDelay = db.generateCompactionDelay() + } + go db.run(ctx) return db, nil @@ -1186,6 +1203,12 @@ func (a dbAppender) Commit() error { return err } +// waitingForCompactionDelay returns true if the DB is waiting for the Head compaction delay. +// This doesn't guarantee that the Head is really compactable. +func (db *DB) waitingForCompactionDelay() bool { + return time.Since(db.timeWhenCompactionDelayStarted) < db.opts.CompactionDelay +} + // Compact data if possible. After successful compaction blocks are reloaded // which will also delete the blocks that fall out of the retention window. // Old blocks are only deleted on reloadBlocks based on the new block's parent information. @@ -1219,7 +1242,21 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { return nil default: } + if !db.head.compactable() { + // Reset the counter once the head compactions are done. + // This would also reset it if a manual compaction was triggered while the auto compaction was in its delay period. + if !db.timeWhenCompactionDelayStarted.IsZero() { + db.timeWhenCompactionDelayStarted = time.Time{} + } + break + } + + if db.timeWhenCompactionDelayStarted.IsZero() { + // Start counting for the delay. + db.timeWhenCompactionDelayStarted = time.Now() + } + if db.waitingForCompactionDelay() { break } mint := db.head.MinTime() @@ -1429,7 +1466,7 @@ func (db *DB) compactBlocks() (err error) { // If we have a lot of blocks to compact the whole process might take // long enough that we end up with a HEAD block that needs to be written. // Check if that's the case and stop compactions early. - if db.head.compactable() { + if db.head.compactable() && !db.waitingForCompactionDelay() { level.Warn(db.logger).Log("msg", "aborting block compactions to persit the head block") return nil } @@ -1932,6 +1969,11 @@ func (db *DB) EnableCompactions() { level.Info(db.logger).Log("msg", "Compactions enabled") } +func (db *DB) generateCompactionDelay() time.Duration { + // Up to 10% of the head's chunkRange. + return time.Duration(rand.Int63n(db.head.chunkRange.Load()/10)) * time.Millisecond +} + // ForceHeadMMap is intended for use only in tests and benchmarks. func (db *DB) ForceHeadMMap() { db.head.mmapHeadChunks() diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 781f36026..cf41e25f2 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -7357,3 +7357,25 @@ func TestBlockQuerierAndBlockChunkQuerier(t *testing.T) { // Make sure only block-1 is queried. require.Equal(t, "block-1", lbls.Get("block")) } + +func TestGenerateCompactionDelay(t *testing.T) { + assertDelay := func(delay time.Duration) { + t.Helper() + require.GreaterOrEqual(t, delay, time.Duration(0)) + // Less than 10% of the chunkRange. + require.LessOrEqual(t, delay, 6000*time.Millisecond) + } + + opts := DefaultOptions() + opts.EnableDelayedCompaction = true + db := openTestDB(t, opts, []int64{60000}) + defer func() { + require.NoError(t, db.Close()) + }() + // The offset is generated and changed while opening. + assertDelay(db.opts.CompactionDelay) + + for i := 0; i < 1000; i++ { + assertDelay(db.generateCompactionDelay()) + } +} From 17b0b788da6a9efda12509aa2e829fd7e4b1efb8 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Wed, 7 Aug 2024 20:15:46 +0200 Subject: [PATCH 59/99] Update promql/engine.go Signed-off-by: George Krajcsovits --- promql/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/promql/engine.go b/promql/engine.go index 621c2116e..f6b79f3a4 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2357,7 +2357,7 @@ loop: histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}}) } if histograms[n].H == nil { - // Initialize to non zero to AtFloatHistogram does a copy for sure. + // Make sure to pass non zero H to AtFloatHistogram so that it does a deep-copy. // Not an issue in the loop above since that uses an intermediate buffer. histograms[n].H = &histogram.FloatHistogram{} } From f91009aa2ef0721ec41870d7b1ae4c44876801cb Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Thu, 8 Aug 2024 09:11:38 +1000 Subject: [PATCH 60/99] promql: clarify error message when panic occurs during query evaluation Signed-off-by: Charles Korn --- promql/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/promql/engine.go b/promql/engine.go index 1427302e5..102987d73 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1057,7 +1057,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp buf := make([]byte, 64<<10) buf = buf[:runtime.Stack(buf, false)] - level.Error(ev.logger).Log("msg", "runtime panic in parser", "expr", expr.String(), "err", e, "stacktrace", string(buf)) + level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) *errp = fmt.Errorf("unexpected error: %w", err) case errWithWarnings: *errp = err.err From b7a58dcf3d2347ade21ce946007efb0bcb5e22a2 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Thu, 8 Aug 2024 03:09:39 -0700 Subject: [PATCH 61/99] Add hidden flag to disable overlapping compaction (#14581) TSDB: add hidden flag to disable overlapping compaction Signed-off-by: Ben Ye --------- Signed-off-by: Ben Ye --- cmd/prometheus/main.go | 6 +++++- tsdb/db.go | 1 - 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index d7333b657..51320c661 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -384,6 +384,9 @@ func main() { serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "[DEPRECATED] This flag has no effect. Overlapping blocks are enabled by default now."). Default("true").Hidden().BoolVar(&b) + serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks."). + Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction) + serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL."). Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression) @@ -1719,6 +1722,7 @@ type tsdbOptions struct { EnableMemorySnapshotOnShutdown bool EnableNativeHistograms bool EnableDelayedCompaction bool + EnableOverlappingCompaction bool } func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { @@ -1739,8 +1743,8 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown, EnableNativeHistograms: opts.EnableNativeHistograms, OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow, - EnableOverlappingCompaction: true, EnableDelayedCompaction: opts.EnableDelayedCompaction, + EnableOverlappingCompaction: opts.EnableOverlappingCompaction, } } diff --git a/tsdb/db.go b/tsdb/db.go index 3c73c892a..1c430c211 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -187,7 +187,6 @@ type Options struct { // The reason why this flag exists is because there are various users of the TSDB // that do not want vertical compaction happening on ingest time. Instead, // they'd rather keep overlapping blocks and let another component do the overlapping compaction later. - // For Prometheus, this will always be true. EnableOverlappingCompaction bool // EnableSharding enables query sharding support in TSDB. From 02c465bf58fe4b0270db9d5b3f5a15a2ffee7ba6 Mon Sep 17 00:00:00 2001 From: Manik Rana Date: Thu, 8 Aug 2024 17:05:35 +0530 Subject: [PATCH 62/99] textparse: Implement `CreatedTimestamp()` in `openmetricsparse.go` (#14356) * feat: initial implement of createedTimestamp() with tests Signed-off-by: Manik Rana * feat: return ct after finding it Signed-off-by: Manik Rana * chore: remove unneeded test Signed-off-by: Manik Rana * chore: add comments Signed-off-by: Manik Rana * feat: multiple changes - implement changes from pair programming session - use newParse.val() - advance parser p if ct is found Signed-off-by: Manik Rana * fix: check if err from p.Next() Signed-off-by: Manik Rana * feat: advance parser and parse histograms + summary Signed-off-by: Manik Rana * fix: restore previous tests Signed-off-by: Manik Rana * fix: retore failing tests Signed-off-by: Manik Rana * chore: remove unneeded comments Signed-off-by: Manik Rana * fix: return nil when mtype doesn't match Signed-off-by: Manik Rana * chore: update go fmt version Signed-off-by: Manik Rana * chore: cleanup Signed-off-by: Manik Rana * fix: comments Signed-off-by: Manik Rana * feat: document deepcopyparser Co-authored-by: Arthur Silva Sens Signed-off-by: Manik Rana Signed-off-by: Manik Rana * chore: lint Signed-off-by: Manik Rana * fix: cover edgecase of `gauge_created` in CreatedTimestamp() Signed-off-by: Manik Rana * refac: readability updates Signed-off-by: Manik Rana * refac: dedeuplicate labeldiff checks Signed-off-by: Manik Rana * tests: add tests for new label functions Signed-off-by: Manik Rana * feat: document CreatedTimestamp func Signed-off-by: Manik Rana * refac: optimize `CreatedTimestamp()` - Use refactored CreatedTimestamp function with bug fixes - Remove unused code in labels.go - Improve code documentation Signed-off-by: Manik Rana Signed-off-by: Manik Rana * chore: add tests and lint fixes Signed-off-by: Manik Rana * chore: remove mName Signed-off-by: Manik Rana * chore: lint Signed-off-by: Manik Rana * chore: comments Signed-off-by: Manik Rana * tests: add tests for CT parse failures and deepCopy Signed-off-by: Manik Rana * refac: edit expectCT struct Signed-off-by: Manik Rana * chore: lint Signed-off-by: Manik Rana * chore: add new label in deepCopy Signed-off-by: Manik Rana * fix: use p.builder in deepCopy Signed-off-by: Manik Rana * fix: add NewMetricsParserWithOpts Signed-off-by: Manik Rana * chore: lint Signed-off-by: Manik Rana * chore: comments Co-authored-by: Arthur Silva Sens Signed-off-by: Manik Rana * chore: comments Co-authored-by: Arthur Silva Sens Signed-off-by: Manik Rana * chore: rename var Signed-off-by: Manik Rana * fix: add condition for OM fuzzing Signed-off-by: Manik Rana * fix: build tags Signed-off-by: Manik Rana * refac: default skipCT to false Signed-off-by: Manik Rana * refac: rename skipCT to skipCTSeries Signed-off-by: Manik Rana * chore: formatting Signed-off-by: Manik Rana * chore: comments and readability updates Signed-off-by: Manik Rana * chore: comments Co-authored-by: Bartlomiej Plotka Signed-off-by: Manik Rana * refac: remove NewOpenMetricsParserWithOpts Signed-off-by: Manik Rana * chore: lint Signed-off-by: Manik Rana * refac: extract skipCTSeries logic from parseMetricSuffix Signed-off-by: Manik Rana * refac: inline create a NewOpenMetricsParser Signed-off-by: Manik Rana * chore: comments Signed-off-by: Manik Rana * chore: comments Co-authored-by: Bartlomiej Plotka Signed-off-by: Manik Rana * refac: improve error handling Signed-off-by: Manik Rana * fix: return error instead of nil Signed-off-by: Manik Rana * fix: remove skipCT check from tBraceOpen Signed-off-by: Manik Rana * Pair programming with Manik, Arthur and Daniel. Signed-off-by: bwplotka * chore: comments and use helper funcs Signed-off-by: Manik Rana * chore: lint Signed-off-by: Manik Rana --------- Signed-off-by: Manik Rana Signed-off-by: Manik Rana Signed-off-by: bwplotka Co-authored-by: Arthur Silva Sens Co-authored-by: Bartlomiej Plotka --- model/textparse/openmetricsparse.go | 179 ++++++++++-- model/textparse/openmetricsparse_test.go | 330 ++++++++++++++++++++++- model/textparse/promparse_test.go | 25 +- promql/fuzz.go | 4 + 4 files changed, 503 insertions(+), 35 deletions(-) diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index b7ad1dd85..5f0415d3e 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -94,16 +94,46 @@ type OpenMetricsParser struct { exemplarVal float64 exemplarTs int64 hasExemplarTs bool + + skipCTSeries bool } -// NewOpenMetricsParser returns a new parser of the byte slice. -func NewOpenMetricsParser(b []byte, st *labels.SymbolTable) Parser { - return &OpenMetricsParser{ - l: &openMetricsLexer{b: b}, - builder: labels.NewScratchBuilderWithSymbolTable(st, 16), +type openMetricsParserOptions struct { + SkipCTSeries bool +} + +type OpenMetricsOption func(*openMetricsParserOptions) + +// WithOMParserCTSeriesSkipped turns off exposing _created lines +// as series, which makes those only used for parsing created timestamp +// for `CreatedTimestamp` method purposes. +// +// It's recommended to use this option to avoid using _created lines for other +// purposes than created timestamp, but leave false by default for the +// best-effort compatibility. +func WithOMParserCTSeriesSkipped() OpenMetricsOption { + return func(o *openMetricsParserOptions) { + o.SkipCTSeries = true } } +// NewOpenMetricsParser returns a new parser for the byte slice with option to skip CT series parsing. +func NewOpenMetricsParser(b []byte, st *labels.SymbolTable, opts ...OpenMetricsOption) Parser { + options := &openMetricsParserOptions{} + + for _, opt := range opts { + opt(options) + } + + parser := &OpenMetricsParser{ + l: &openMetricsLexer{b: b}, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + skipCTSeries: options.SkipCTSeries, + } + + return parser +} + // Series returns the bytes of the series, the timestamp if set, and the value // of the current sample. func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { @@ -219,10 +249,90 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { return true } -// CreatedTimestamp returns nil as it's not implemented yet. -// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 +// CreatedTimestamp returns the created timestamp for a current Metric if exists or nil. +// NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series. func (p *OpenMetricsParser) CreatedTimestamp() *int64 { - return nil + if !TypeRequiresCT(p.mtype) { + // Not a CT supported metric type, fast path. + return nil + } + + var ( + currLset labels.Labels + buf []byte + peekWithoutNameLsetHash uint64 + ) + p.Metric(&currLset) + currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") + // Search for the _created line for the currFamilyLsetHash using ephemeral parser until + // we see EOF or new metric family. We have to do it as we don't know where (and if) + // that CT line is. + // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + peek := deepCopy(p) + for { + eType, err := peek.Next() + if err != nil { + // This means peek will give error too later on, so def no CT line found. + // This might result in partial scrape with wrong/missing CT, but only + // spec improvement would help. + // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + return nil + } + if eType != EntrySeries { + // Assume we hit different family, no CT line found. + return nil + } + + var peekedLset labels.Labels + peek.Metric(&peekedLset) + peekedName := peekedLset.Get(model.MetricNameLabel) + if !strings.HasSuffix(peekedName, "_created") { + // Not a CT line, search more. + continue + } + + // We got a CT line here, but let's search if CT line is actually for our series, edge case. + peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") + if peekWithoutNameLsetHash != currFamilyLsetHash { + // CT line for a different series, for our series no CT. + return nil + } + ct := int64(peek.val) + return &ct + } +} + +// TypeRequiresCT returns true if the metric type requires a _created timestamp. +func TypeRequiresCT(t model.MetricType) bool { + switch t { + case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram: + return true + default: + return false + } +} + +// deepCopy creates a copy of a parser without re-using the slices' original memory addresses. +func deepCopy(p *OpenMetricsParser) OpenMetricsParser { + newB := make([]byte, len(p.l.b)) + copy(newB, p.l.b) + + newLexer := &openMetricsLexer{ + b: newB, + i: p.l.i, + start: p.l.start, + err: p.l.err, + state: p.l.state, + } + + newParser := OpenMetricsParser{ + l: newLexer, + builder: p.builder, + mtype: p.mtype, + val: p.val, + skipCTSeries: false, + } + return newParser } // nextToken returns the next token from the openMetricsLexer. @@ -337,7 +447,13 @@ func (p *OpenMetricsParser) Next() (Entry, error) { } p.series = p.l.b[p.start:p.l.i] - return p.parseMetricSuffix(p.nextToken()) + if err := p.parseSeriesEndOfLine(p.nextToken()); err != nil { + return EntryInvalid, err + } + if p.skipCTSeries && p.isCreatedSeries() { + return p.Next() + } + return EntrySeries, nil case tMName: p.offsets = append(p.offsets, p.start, p.l.i) p.series = p.l.b[p.start:p.l.i] @@ -351,8 +467,14 @@ func (p *OpenMetricsParser) Next() (Entry, error) { p.series = p.l.b[p.start:p.l.i] t2 = p.nextToken() } - return p.parseMetricSuffix(t2) + if err := p.parseSeriesEndOfLine(t2); err != nil { + return EntryInvalid, err + } + if p.skipCTSeries && p.isCreatedSeries() { + return p.Next() + } + return EntrySeries, nil default: err = p.parseError("expected a valid start token", t) } @@ -467,51 +589,64 @@ func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, e } } -// parseMetricSuffix parses the end of the line after the metric name and -// labels. It starts parsing with the provided token. -func (p *OpenMetricsParser) parseMetricSuffix(t token) (Entry, error) { +// isCreatedSeries returns true if the current series is a _created series. +func (p *OpenMetricsParser) isCreatedSeries() bool { + var newLbs labels.Labels + p.Metric(&newLbs) + name := newLbs.Get(model.MetricNameLabel) + if TypeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") { + return true + } + return false +} + +// parseSeriesEndOfLine parses the series end of the line (value, optional +// timestamp, commentary, etc.) after the metric name and labels. +// It starts parsing with the provided token. +func (p *OpenMetricsParser) parseSeriesEndOfLine(t token) error { if p.offsets[0] == -1 { - return EntryInvalid, fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) + return fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) } var err error p.val, err = p.getFloatValue(t, "metric") if err != nil { - return EntryInvalid, err + return err } p.hasTS = false switch t2 := p.nextToken(); t2 { case tEOF: - return EntryInvalid, errors.New("data does not end with # EOF") + return errors.New("data does not end with # EOF") case tLinebreak: break case tComment: if err := p.parseComment(); err != nil { - return EntryInvalid, err + return err } case tTimestamp: p.hasTS = true var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) + return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { - return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts) + return fmt.Errorf("invalid timestamp %f", ts) } p.ts = int64(ts * 1000) switch t3 := p.nextToken(); t3 { case tLinebreak: case tComment: if err := p.parseComment(); err != nil { - return EntryInvalid, err + return err } default: - return EntryInvalid, p.parseError("expected next entry after timestamp", t3) + return p.parseError("expected next entry after timestamp", t3) } } - return EntrySeries, nil + + return nil } func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) { diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index bc76a540d..cadaabc99 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -14,6 +14,7 @@ package textparse import ( + "errors" "io" "testing" @@ -24,6 +25,8 @@ import ( "github.com/prometheus/prometheus/model/labels" ) +func int64p(x int64) *int64 { return &x } + func TestOpenMetricsParse(t *testing.T) { input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary @@ -63,15 +66,34 @@ ss{A="a"} 0 _metric_starting_with_underscore 1 testmetric{_label_starting_with_underscore="foo"} 1 testmetric{label="\"bar\""} 1 +# HELP foo Counter with and without labels to certify CT is parsed for both cases # TYPE foo counter -foo_total 17.0 1520879607.789 # {id="counter-test"} 5` +foo_total 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created 1000 +foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created{a="b"} 1000 +# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far +# TYPE bar summary +bar_count 17.0 +bar_sum 324789.3 +bar{quantile="0.95"} 123.7 +bar{quantile="0.99"} 150.0 +bar_created 1520430000 +# HELP baz Histogram with the same objective as above's summary +# TYPE baz histogram +baz_bucket{le="0.0"} 0 +baz_bucket{le="+Inf"} 17 +baz_count 17 +baz_sum 324789.3 +baz_created 1520430000 +# HELP fizz_created Gauge which shouldn't be parsed as CT +# TYPE fizz_created gauge +fizz_created 17.0` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" input += "\n# EOF\n" - int64p := func(x int64) *int64 { return &x } - exp := []expectedParse{ { m: "go_gc_duration_seconds", @@ -216,6 +238,9 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` m: "testmetric{label=\"\\\"bar\\\"\"}", v: 1, lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), + }, { + m: "foo", + help: "Counter with and without labels to certify CT is parsed for both cases", }, { m: "foo", typ: model.MetricTypeCounter, @@ -225,6 +250,76 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` lset: labels.FromStrings("__name__", "foo_total"), t: int64p(1520879607789), e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + ct: int64p(1000), + }, { + m: `foo_total{a="b"}`, + v: 17.0, + lset: labels.FromStrings("__name__", "foo_total", "a", "b"), + t: int64p(1520879607789), + e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + ct: int64p(1000), + }, { + m: "bar", + help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", + }, { + m: "bar", + typ: model.MetricTypeSummary, + }, { + m: "bar_count", + v: 17.0, + lset: labels.FromStrings("__name__", "bar_count"), + ct: int64p(1520430000), + }, { + m: "bar_sum", + v: 324789.3, + lset: labels.FromStrings("__name__", "bar_sum"), + ct: int64p(1520430000), + }, { + m: `bar{quantile="0.95"}`, + v: 123.7, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), + ct: int64p(1520430000), + }, { + m: `bar{quantile="0.99"}`, + v: 150.0, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), + ct: int64p(1520430000), + }, { + m: "baz", + help: "Histogram with the same objective as above's summary", + }, { + m: "baz", + typ: model.MetricTypeHistogram, + }, { + m: `baz_bucket{le="0.0"}`, + v: 0, + lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"), + ct: int64p(1520430000), + }, { + m: `baz_bucket{le="+Inf"}`, + v: 17, + lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"), + ct: int64p(1520430000), + }, { + m: `baz_count`, + v: 17, + lset: labels.FromStrings("__name__", "baz_count"), + ct: int64p(1520430000), + }, { + m: `baz_sum`, + v: 324789.3, + lset: labels.FromStrings("__name__", "baz_sum"), + ct: int64p(1520430000), + }, { + m: "fizz_created", + help: "Gauge which shouldn't be parsed as CT", + }, { + m: "fizz_created", + typ: model.MetricTypeGauge, + }, { + m: `fizz_created`, + v: 17, + lset: labels.FromStrings("__name__", "fizz_created"), }, { m: "metric", help: "foo\x00bar", @@ -235,8 +330,8 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` }, } - p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + checkParseResultsWithCT(t, p, exp, true) } func TestUTF8OpenMetricsParse(t *testing.T) { @@ -251,6 +346,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) { # UNIT "go.gc_duration_seconds" seconds {"go.gc_duration_seconds",quantile="0"} 4.9351e-05 {"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05 +{"go.gc_duration_seconds_created"} 12313 {"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 @@ -274,10 +370,12 @@ func TestUTF8OpenMetricsParse(t *testing.T) { m: `{"go.gc_duration_seconds",quantile="0"}`, v: 4.9351e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), + ct: int64p(12313), }, { m: `{"go.gc_duration_seconds",quantile="0.25"}`, v: 7.424100000000001e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"), + ct: int64p(12313), }, { m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`, v: 8.3835e-05, @@ -306,8 +404,8 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"), }, } - p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + checkParseResultsWithCT(t, p, exp, true) } func TestOpenMetricsParseErrors(t *testing.T) { @@ -598,10 +696,6 @@ func TestOpenMetricsParseErrors(t *testing.T) { input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf", err: `invalid exemplar timestamp -Inf`, }, - { - input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf", - err: `invalid exemplar timestamp +Inf`, - }, } for i, c := range cases { @@ -684,3 +778,217 @@ func TestOMNullByteHandling(t *testing.T) { require.Equal(t, c.err, err.Error(), "test %d", i) } } + +// While not desirable, there are cases were CT fails to parse and +// these tests show them. +// TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. +func TestCTParseFailures(t *testing.T) { + input := `# HELP something Histogram with _created between buckets and summary +# TYPE something histogram +something_count 17 +something_sum 324789.3 +something_created 1520430001 +something_bucket{le="0.0"} 0 +something_bucket{le="+Inf"} 17 +# HELP thing Histogram with _created as first line +# TYPE thing histogram +thing_created 1520430002 +thing_count 17 +thing_sum 324789.3 +thing_bucket{le="0.0"} 0 +thing_bucket{le="+Inf"} 17 +# HELP yum Summary with _created between sum and quantiles +# TYPE yum summary +yum_count 17.0 +yum_sum 324789.3 +yum_created 1520430003 +yum{quantile="0.95"} 123.7 +yum{quantile="0.99"} 150.0 +# HELP foobar Summary with _created as the first line +# TYPE foobar summary +foobar_created 1520430004 +foobar_count 17.0 +foobar_sum 324789.3 +foobar{quantile="0.95"} 123.7 +foobar{quantile="0.99"} 150.0` + + input += "\n# EOF\n" + + int64p := func(x int64) *int64 { return &x } + + type expectCT struct { + m string + ct *int64 + typ model.MetricType + help string + isErr bool + } + + exp := []expectCT{ + { + m: "something", + help: "Histogram with _created between buckets and summary", + isErr: false, + }, { + m: "something", + typ: model.MetricTypeHistogram, + isErr: false, + }, { + m: `something_count`, + ct: int64p(1520430001), + isErr: false, + }, { + m: `something_sum`, + ct: int64p(1520430001), + isErr: false, + }, { + m: `something_bucket{le="0.0"}`, + ct: int64p(1520430001), + isErr: true, + }, { + m: `something_bucket{le="+Inf"}`, + ct: int64p(1520430001), + isErr: true, + }, { + m: "thing", + help: "Histogram with _created as first line", + isErr: false, + }, { + m: "thing", + typ: model.MetricTypeHistogram, + isErr: false, + }, { + m: `thing_count`, + ct: int64p(1520430002), + isErr: true, + }, { + m: `thing_sum`, + ct: int64p(1520430002), + isErr: true, + }, { + m: `thing_bucket{le="0.0"}`, + ct: int64p(1520430002), + isErr: true, + }, { + m: `thing_bucket{le="+Inf"}`, + ct: int64p(1520430002), + isErr: true, + }, { + m: "yum", + help: "Summary with _created between summary and quantiles", + isErr: false, + }, { + m: "yum", + typ: model.MetricTypeSummary, + isErr: false, + }, { + m: "yum_count", + ct: int64p(1520430003), + isErr: false, + }, { + m: "yum_sum", + ct: int64p(1520430003), + isErr: false, + }, { + m: `yum{quantile="0.95"}`, + ct: int64p(1520430003), + isErr: true, + }, { + m: `yum{quantile="0.99"}`, + ct: int64p(1520430003), + isErr: true, + }, { + m: "foobar", + help: "Summary with _created as the first line", + isErr: false, + }, { + m: "foobar", + typ: model.MetricTypeSummary, + isErr: false, + }, { + m: "foobar_count", + ct: int64p(1520430004), + isErr: true, + }, { + m: "foobar_sum", + ct: int64p(1520430004), + isErr: true, + }, { + m: `foobar{quantile="0.95"}`, + ct: int64p(1520430004), + isErr: true, + }, { + m: `foobar{quantile="0.99"}`, + ct: int64p(1520430004), + isErr: true, + }, + } + + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + i := 0 + + var res labels.Labels + for { + et, err := p.Next() + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + + switch et { + case EntrySeries: + p.Metric(&res) + + if ct := p.CreatedTimestamp(); exp[i].isErr { + require.Nil(t, ct) + } else { + require.Equal(t, *exp[i].ct, *ct) + } + default: + i++ + continue + } + i++ + } +} + +func TestDeepCopy(t *testing.T) { + input := []byte(`# HELP go_goroutines A gauge goroutines. +# TYPE go_goroutines gauge +go_goroutines 33 123.123 +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds +go_gc_duration_seconds_created`) + + st := labels.NewSymbolTable() + parser := NewOpenMetricsParser(input, st, WithOMParserCTSeriesSkipped()).(*OpenMetricsParser) + + // Modify the original parser state + _, err := parser.Next() + require.NoError(t, err) + require.Equal(t, "go_goroutines", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) + require.True(t, parser.skipCTSeries) + + // Create a deep copy of the parser + copyParser := deepCopy(parser) + etype, err := copyParser.Next() + require.NoError(t, err) + require.Equal(t, EntryType, etype) + require.True(t, parser.skipCTSeries) + require.False(t, copyParser.skipCTSeries) + + // Modify the original parser further + parser.Next() + parser.Next() + parser.Next() + require.Equal(t, "go_gc_duration_seconds", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) + require.Equal(t, "summary", string(parser.mtype)) + require.False(t, copyParser.skipCTSeries) + require.True(t, parser.skipCTSeries) + + // Ensure the copy remains unchanged + copyParser.Next() + copyParser.Next() + require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]])) + require.False(t, copyParser.skipCTSeries) +} diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 66986291d..7971d23b7 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -18,6 +18,7 @@ import ( "errors" "io" "os" + "strings" "testing" "github.com/klauspost/compress/gzip" @@ -41,6 +42,7 @@ type expectedParse struct { unit string comment string e *exemplar.Exemplar + ct *int64 } func TestPromParse(t *testing.T) { @@ -188,6 +190,10 @@ testmetric{label="\"bar\""} 1` } func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { + checkParseResultsWithCT(t, p, exp, false) +} + +func checkParseResultsWithCT(t *testing.T, p Parser, exp []expectedParse, ctLinesRemoved bool) { i := 0 var res labels.Labels @@ -205,6 +211,14 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { p.Metric(&res) + if ctLinesRemoved { + // Are CT series skipped? + _, typ := p.Type() + if TypeRequiresCT(typ) && strings.HasSuffix(res.Get(labels.MetricName), "_created") { + t.Fatalf("we exped created lines skipped") + } + } + require.Equal(t, exp[i].m, string(m)) require.Equal(t, exp[i].t, ts) require.Equal(t, exp[i].v, v) @@ -218,6 +232,11 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { require.True(t, found) testutil.RequireEqual(t, *exp[i].e, e) } + if ct := p.CreatedTimestamp(); ct != nil { + require.Equal(t, *exp[i].ct, *ct) + } else { + require.Nil(t, exp[i].ct) + } case EntryType: m, typ := p.Type() @@ -475,8 +494,10 @@ const ( func BenchmarkParse(b *testing.B) { for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ - "prometheus": NewPromParser, - "openmetrics": NewOpenMetricsParser, + "prometheus": NewPromParser, + "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st) + }, } { for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { f, err := os.Open(fn) diff --git a/promql/fuzz.go b/promql/fuzz.go index 3fd50b949..5f08e6a72 100644 --- a/promql/fuzz.go +++ b/promql/fuzz.go @@ -68,6 +68,10 @@ func fuzzParseMetricWithContentType(in []byte, contentType string) int { panic(warning) } + if contentType == "application/openmetrics-text" { + p = textparse.NewOpenMetricsParser(in, symbolTable) + } + var err error for { _, err = p.Next() From 2360ce8d2be00a7a7f237e812d88300751b5322f Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 8 Aug 2024 14:37:29 +0200 Subject: [PATCH 63/99] docs: Be more explicit about `promql-experimental-functions` We have not mentioned that experimental PromQL functions might get removed entirely, although that's one of the most important properties of functions declared experimental. Signed-off-by: beorn7 --- docs/feature_flags.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 3f92ab7fd..c9a3558fa 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -200,8 +200,9 @@ won't work when you push OTLP metrics. `--enable-feature=promql-experimental-functions` -Enables PromQL functions that are considered experimental and whose name or -semantics could change. +Enables PromQL functions that are considered experimental. These functions +might change their name, syntax, or semantics. They might also get removed +entirely. ## Created Timestamps Zero Injection From 1ea37816996ada0873c8d136c0033580e9a0301b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 8 Aug 2024 15:05:29 +0200 Subject: [PATCH 64/99] Fix ToEncodedChunks minT for recoded chunks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Discovered while working on #14546 OOO native histograms. Not triggered on main before #14546 as the code path is unused. There was a bug where the min time of a chunk was adjusted even if it was only recoded and not completely new. Signed-off-by: György Krajcsovits --- tsdb/head_append.go | 2 +- tsdb/ooo_head.go | 13 ++-- tsdb/ooo_head_test.go | 143 +++++++++++++++++++++++++++++++++++++++++- 3 files changed, 149 insertions(+), 9 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 8d66d1e81..0d4628eaf 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1091,7 +1091,7 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk chunkCreated = true } - ok := c.chunk.Insert(t, v) + ok := c.chunk.Insert(t, v, nil, nil) if ok { if chunkCreated || t < c.minTime { c.minTime = t diff --git a/tsdb/ooo_head.go b/tsdb/ooo_head.go index b2556d62e..59477e5a5 100644 --- a/tsdb/ooo_head.go +++ b/tsdb/ooo_head.go @@ -17,6 +17,7 @@ import ( "fmt" "sort" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/oklog/ulid" @@ -39,13 +40,13 @@ func NewOOOChunk() *OOOChunk { // Insert inserts the sample such that order is maintained. // Returns false if insert was not possible due to the same timestamp already existing. -func (o *OOOChunk) Insert(t int64, v float64) bool { +func (o *OOOChunk) Insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) bool { // Although out-of-order samples can be out-of-order amongst themselves, we // are opinionated and expect them to be usually in-order meaning we could // try to append at the end first if the new timestamp is higher than the // last known timestamp. if len(o.samples) == 0 || t > o.samples[len(o.samples)-1].t { - o.samples = append(o.samples, sample{t, v, nil, nil}) + o.samples = append(o.samples, sample{t, v, h, fh}) return true } @@ -54,7 +55,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool { if i >= len(o.samples) { // none found. append it at the end - o.samples = append(o.samples, sample{t, v, nil, nil}) + o.samples = append(o.samples, sample{t, v, h, fh}) return true } @@ -66,7 +67,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool { // Expand length by 1 to make room. use a zero sample, we will overwrite it anyway. o.samples = append(o.samples, sample{}) copy(o.samples[i+1:], o.samples[i:]) - o.samples[i] = sample{t, v, nil, nil} + o.samples[i] = sample{t, v, h, fh} return true } @@ -142,9 +143,9 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error if newChunk != nil { // A new chunk was allocated. if !recoded { chks = append(chks, memChunk{chunk, cmint, cmaxt, nil}) + cmint = s.t } chunk = newChunk - cmint = s.t } case chunkenc.EncFloatHistogram: // Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway. @@ -157,9 +158,9 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error if newChunk != nil { // A new chunk was allocated. if !recoded { chks = append(chks, memChunk{chunk, cmint, cmaxt, nil}) + cmint = s.t } chunk = newChunk - cmint = s.t } } cmaxt = s.t diff --git a/tsdb/ooo_head_test.go b/tsdb/ooo_head_test.go index 27ff4048b..d3cd5f601 100644 --- a/tsdb/ooo_head_test.go +++ b/tsdb/ooo_head_test.go @@ -14,8 +14,14 @@ package tsdb import ( + "math" "testing" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/stretchr/testify/require" ) @@ -52,7 +58,7 @@ func TestOOOInsert(t *testing.T) { chunk := NewOOOChunk() chunk.samples = makeEvenSampleSlice(numPreExisting) newSample := samplify(valOdd(insertPos)) - chunk.Insert(newSample.t, newSample.f) + chunk.Insert(newSample.t, newSample.f, nil, nil) var expSamples []sample // Our expected new samples slice, will be first the original samples. @@ -83,7 +89,7 @@ func TestOOOInsertDuplicate(t *testing.T) { dupSample := chunk.samples[dupPos] dupSample.f = 0.123 - ok := chunk.Insert(dupSample.t, dupSample.f) + ok := chunk.Insert(dupSample.t, dupSample.f, nil, nil) expSamples := makeEvenSampleSlice(num) // We expect no change. require.False(t, ok) @@ -91,3 +97,136 @@ func TestOOOInsertDuplicate(t *testing.T) { } } } + +type chunkVerify struct { + encoding chunkenc.Encoding + minTime int64 + maxTime int64 +} + +func TestOOOChunks_ToEncodedChunks(t *testing.T) { + h1 := tsdbutil.GenerateTestHistogram(1) + // Make h2 appendable but with more buckets, to trigger recoding. + h2 := h1.Copy() + h2.PositiveSpans = append(h2.PositiveSpans, histogram.Span{Offset: 1, Length: 1}) + h2.PositiveBuckets = append(h2.PositiveBuckets, 12) + + testCases := map[string]struct { + samples []sample + expectedCounterResets []histogram.CounterResetHint + expectedChunks []chunkVerify + }{ + "empty": { + samples: []sample{}, + }, + "has floats": { + samples: []sample{ + {t: 1000, f: 43.0}, + {t: 1100, f: 42.0}, + }, + expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset}, + expectedChunks: []chunkVerify{ + {encoding: chunkenc.EncXOR, minTime: 1000, maxTime: 1100}, + }, + }, + "mix of floats and histograms": { + samples: []sample{ + {t: 1000, f: 43.0}, + {t: 1100, h: h1}, + {t: 1200, f: 42.0}, + }, + expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset, histogram.UnknownCounterReset}, + expectedChunks: []chunkVerify{ + {encoding: chunkenc.EncXOR, minTime: 1000, maxTime: 1000}, + {encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100}, + {encoding: chunkenc.EncXOR, minTime: 1200, maxTime: 1200}, + }, + }, + "has a counter reset": { + samples: []sample{ + {t: 1000, h: h2}, + {t: 1100, h: h1}, + }, + expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.CounterReset}, + expectedChunks: []chunkVerify{ + {encoding: chunkenc.EncHistogram, minTime: 1000, maxTime: 1000}, + {encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100}, + }, + }, + "has a recoded histogram": { // Regression test for wrong minT, maxT in histogram recoding. + samples: []sample{ + {t: 0, h: h1}, + {t: 1, h: h2}, + }, + expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset}, + expectedChunks: []chunkVerify{ + {encoding: chunkenc.EncHistogram, minTime: 0, maxTime: 1}, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + // Sanity check. + require.Equal(t, len(tc.samples), len(tc.expectedCounterResets), "number of samples and counter resets") + + oooChunk := OOOChunk{} + for _, s := range tc.samples { + switch s.Type() { + case chunkenc.ValFloat: + oooChunk.Insert(s.t, s.f, nil, nil) + case chunkenc.ValHistogram: + oooChunk.Insert(s.t, 0, s.h.Copy(), nil) + case chunkenc.ValFloatHistogram: + oooChunk.Insert(s.t, 0, nil, s.fh.Copy()) + default: + t.Fatalf("unexpected sample type %d", s.Type()) + } + } + + chunks, err := oooChunk.ToEncodedChunks(math.MinInt64, math.MaxInt64) + require.NoError(t, err) + require.Equal(t, len(tc.expectedChunks), len(chunks), "number of chunks") + sampleIndex := 0 + for i, c := range chunks { + require.Equal(t, tc.expectedChunks[i].encoding, c.chunk.Encoding(), "chunk %d encoding", i) + require.Equal(t, tc.expectedChunks[i].minTime, c.minTime, "chunk %d minTime", i) + require.Equal(t, tc.expectedChunks[i].maxTime, c.maxTime, "chunk %d maxTime", i) + samples, err := storage.ExpandSamples(c.chunk.Iterator(nil), newSample) + require.GreaterOrEqual(t, len(tc.samples)-sampleIndex, len(samples), "too many samples in chunk %d expected less than %d", i, len(tc.samples)-sampleIndex) + require.NoError(t, err) + if len(samples) == 0 { + // Ignore empty chunks. + continue + } + switch c.chunk.Encoding() { + case chunkenc.EncXOR: + for j, s := range samples { + require.Equal(t, chunkenc.ValFloat, s.Type()) + // XOR chunks don't have counter reset hints, so we shouldn't expect anything else than UnknownCounterReset. + require.Equal(t, histogram.UnknownCounterReset, tc.expectedCounterResets[sampleIndex+j], "sample reset hint %d", sampleIndex+j) + require.Equal(t, tc.samples[sampleIndex+j].f, s.F(), "sample %d", sampleIndex+j) + } + case chunkenc.EncHistogram: + for j, s := range samples { + require.Equal(t, chunkenc.ValHistogram, s.Type()) + require.Equal(t, tc.expectedCounterResets[sampleIndex+j], s.H().CounterResetHint, "sample reset hint %d", sampleIndex+j) + compareTo := tc.samples[sampleIndex+j].h.Copy() + compareTo.CounterResetHint = tc.expectedCounterResets[sampleIndex+j] + require.Equal(t, compareTo, s.H().Compact(0), "sample %d", sampleIndex+j) + } + case chunkenc.EncFloatHistogram: + for j, s := range samples { + require.Equal(t, chunkenc.ValFloatHistogram, s.Type()) + require.Equal(t, tc.expectedCounterResets[sampleIndex+j], s.FH().CounterResetHint, "sample reset hint %d", sampleIndex+j) + compareTo := tc.samples[sampleIndex+j].fh.Copy() + compareTo.CounterResetHint = tc.expectedCounterResets[sampleIndex+j] + require.Equal(t, compareTo, s.FH().Compact(0), "sample %d", sampleIndex+j) + } + } + sampleIndex += len(samples) + } + require.Equal(t, len(tc.samples), sampleIndex, "number of samples") + }) + } +} From 82bb35fabb609b9da87c6c15931917486ca8911a Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Fri, 9 Aug 2024 13:51:31 +1000 Subject: [PATCH 65/99] Address PR feedback: fix typo and rename variable Signed-off-by: Charles Korn --- promql/engine.go | 34 +++++++++---------- .../testdata/native_histograms.test | 2 +- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 6f0c64d42..b20690a6d 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2781,12 +2781,12 @@ type groupedAggregation struct { heap vectorByValueHeap // All bools together for better packing within the struct. - seen bool // Was this output groups seen in the input at this timestamp. - hasFloat bool // Has at least 1 float64 sample aggregated. - hasHistogram bool // Has at least 1 histogram sample aggregated. - abandonHistogram bool // If true, group has seen mixed exponential and custom buckets, or incompatible custom buckets. - groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group. - incrementalMean bool // True after reverting to incremental calculation of the mean value. + seen bool // Was this output groups seen in the input at this timestamp. + hasFloat bool // Has at least 1 float64 sample aggregated. + hasHistogram bool // Has at least 1 histogram sample aggregated. + incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets, or incompatible custom buckets. + groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group. + incrementalMean bool // True after reverting to incremental calculation of the mean value. } // aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix. @@ -2810,11 +2810,11 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // Initialize this group if it's the first time we've seen it. if !group.seen { *group = groupedAggregation{ - seen: true, - floatValue: f, - floatMean: f, - abandonHistogram: false, - groupCount: 1, + seen: true, + floatValue: f, + floatMean: f, + incompatibleHistograms: false, + groupCount: 1, } switch op { case parser.AVG, parser.SUM: @@ -2835,7 +2835,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix continue } - if group.abandonHistogram { + if group.incompatibleHistograms { continue } @@ -2847,7 +2847,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix _, err := group.histogramValue.Add(h) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) - group.abandonHistogram = true + group.incompatibleHistograms = true } } // Otherwise the aggregation contained floats @@ -2868,13 +2868,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix toAdd, err := left.Sub(right) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) - group.abandonHistogram = true + group.incompatibleHistograms = true continue } _, err = group.histogramValue.Add(toAdd) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) - group.abandonHistogram = true + group.incompatibleHistograms = true continue } } @@ -2972,7 +2972,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix continue } switch { - case aggr.abandonHistogram: + case aggr.incompatibleHistograms: continue case aggr.hasHistogram: aggr.histogramValue = aggr.histogramValue.Compact(0) @@ -3001,7 +3001,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix continue } switch { - case aggr.abandonHistogram: + case aggr.incompatibleHistograms: continue case aggr.hasHistogram: aggr.histogramValue.Compact(0) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 62fac87c1..09b02f641 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -772,7 +772,7 @@ load 6m # T=0: only exponential # T=6: only custom -# T=12: mixed, should be ignored and emit an warning +# T=12: mixed, should be ignored and emit a warning eval_warn range from 0 to 12m step 6m sum(metric) {} {{sum:7 count:5 buckets:[2 3 2]}} {{schema:-53 sum:16 count:3 custom_values:[5 10] buckets:[1 2]}} _ From 5cfdde327c2176da01b7f418d3521e5682231340 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Fri, 9 Aug 2024 13:57:37 +1000 Subject: [PATCH 66/99] Address PR feedback: add extra test case Signed-off-by: Charles Korn --- .../testdata/native_histograms.test | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 09b02f641..bb99afd47 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -795,3 +795,24 @@ eval_warn range from 0 to 12m step 6m sum(metric) eval_warn range from 0 to 12m step 6m avg(metric) {} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _ + +clear + +load 1m + metric{group="just-floats", series="1"} 2 + metric{group="just-floats", series="2"} 3 + metric{group="just-exponential-histograms", series="1"} {{sum:3 count:4 buckets:[1 2 1]}} + metric{group="just-exponential-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}} + metric{group="just-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} + metric{group="just-custom-histograms", series="2"} {{schema:-53 sum:3 count:4 custom_values:[2] buckets:[7]}} + metric{group="floats-and-histograms", series="1"} 2 + metric{group="floats-and-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}} + metric{group="exponential-and-custom-histograms", series="1"} {{sum:2 count:3 buckets:[1 1 1]}} + metric{group="exponential-and-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{group="incompatible-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{group="incompatible-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} + +eval_warn instant at 0 sum by (group) (metric) + {group="just-floats"} 5 + {group="just-exponential-histograms"} {{sum:5 count:7 buckets:[2 3 2]}} + {group="just-custom-histograms"} {{schema:-53 sum:4 count:5 custom_values:[2] buckets:[8]}} From 82f38d3e9a6b4eaa7a1ea804762c7da8ae44e930 Mon Sep 17 00:00:00 2001 From: machine424 Date: Fri, 9 Aug 2024 14:53:40 +0200 Subject: [PATCH 67/99] fix(tsdb/db_test.go): close the corrupted chunk after creating it to satisfy Windows FS Signed-off-by: machine424 --- tsdb/db_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index cf41e25f2..5943489ff 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -2690,8 +2690,9 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) { require.NoError(t, db.Close()) // Simulate a corrupted chunk: without a header. - _, err := os.Create(path.Join(mmappedChunksDir(db.dir), "000001")) + chunk, err := os.Create(path.Join(mmappedChunksDir(db.dir), "000001")) require.NoError(t, err) + require.NoError(t, chunk.Close()) spinUpQuerierAndCheck(db.dir, t.TempDir(), 1) From 06a8886b94e18c65fe682da1a66c0172f8236494 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 12 Aug 2024 10:39:08 +0200 Subject: [PATCH 68/99] Native histograms: define behavior when rate is null. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Histogram quantile returns NaN in this case, which might be surprising, so add a unit test that clarifies that this is intentional. Signed-off-by: György Krajcsovits --- promql/promqltest/testdata/histograms.test | 29 ++++++++++++++ .../testdata/native_histograms.test | 39 +++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test index 349a1e79c..ef3ca0078 100644 --- a/promql/promqltest/testdata/histograms.test +++ b/promql/promqltest/testdata/histograms.test @@ -482,3 +482,32 @@ load_with_nhcb 5m eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"}) eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"}) + +# Histogram with constant buckets. +load_with_nhcb 1m + const_histogram_bucket{le="0.0"} 1 1 1 1 1 + const_histogram_bucket{le="1.0"} 1 1 1 1 1 + const_histogram_bucket{le="2.0"} 1 1 1 1 1 + const_histogram_bucket{le="+Inf"} 1 1 1 1 1 + +# There is no change to the bucket count over time, thus rate is 0 in each bucket. +eval instant at 5m rate(const_histogram_bucket[5m]) + {le="0.0"} 0 + {le="1.0"} 0 + {le="2.0"} 0 + {le="+Inf"} 0 + +# There is no change to the bucket count over time, thus rate is 0 in each bucket. +# However native histograms do not represent empty buckets, so here the zeros are implicit. +eval instant at 5m rate(const_histogram[5m]) + {} {{schema:-53 sum:0 count:0 custom_values:[0.0 1.0 2.0]}} + +# Zero buckets mean no observations, so there is no value that observations fall bellow, +# which means that any quantile is a NaN. +eval instant at 5m histogram_quantile(1.0, sum by (le) (rate(const_histogram_bucket[5m]))) + {} NaN + +# Zero buckets mean no observations, so there is no value that observations fall bellow, +# which means that any quantile is a NaN. +eval instant at 5m histogram_quantile(1.0, sum(rate(const_histogram[5m]))) + {} NaN diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index f91626c34..a9ac0303c 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -784,3 +784,42 @@ eval_warn instant at 1m rate(some_metric[30s]) # Start with exponential, end with custom. eval_warn instant at 30s rate(some_metric[30s]) # Should produce no results. + +# Histogram with constant buckets. +load 1m + const_histogram {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} + +# There is no change to the bucket count over time, thus rate is 0 in each bucket. +# However native histograms do not represent empty buckets, so here the zeros are implicit. +eval instant at 5m rate(const_histogram[5m]) + {} {{schema:0 sum:0 count:0}} + +# Zero buckets mean no observations, so average has no meaningful value. +eval instant at 5m histogram_avg(rate(const_histogram[5m])) + {} NaN + +# Zero buckets mean no observations, so count is 0. +eval instant at 5m histogram_count(rate(const_histogram[5m])) + {} 0.0 + +# Zero buckets mean no observations, so the sum should be NaN, However +# we return 0 for compatibility with classic histograms. +eval instant at 5m histogram_sum(rate(const_histogram[5m])) + {} 0.0 + +# BUG??? Zero buckets mean no observations, thus any fraction should be 0. +eval instant at 5m histogram_fraction(0.0, 1.0, rate(const_histogram[5m])) + {} NaN + +# Zero buckets mean no observations, so there is no value that observations fall bellow, +# which means that any quantile is a NaN. +eval instant at 5m histogram_quantile(1.0, rate(const_histogram[5m])) + {} NaN + +# Zero buckets mean no observations, so there is no standard deviation. +eval instant at 5m histogram_stddev(rate(const_histogram[5m])) + {} NaN + +# Zero buckets mean no observations, so there is no standard variance. +eval instant at 5m histogram_stdvar(rate(const_histogram[5m])) + {} NaN From 6aee5b4b38fb2653dc1a93eda960f24de56f1305 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 12 Aug 2024 12:04:45 +0200 Subject: [PATCH 69/99] fix typo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- promql/promqltest/testdata/histograms.test | 4 ++-- promql/promqltest/testdata/native_histograms.test | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test index ef3ca0078..70df9434e 100644 --- a/promql/promqltest/testdata/histograms.test +++ b/promql/promqltest/testdata/histograms.test @@ -502,12 +502,12 @@ eval instant at 5m rate(const_histogram_bucket[5m]) eval instant at 5m rate(const_histogram[5m]) {} {{schema:-53 sum:0 count:0 custom_values:[0.0 1.0 2.0]}} -# Zero buckets mean no observations, so there is no value that observations fall bellow, +# Zero buckets mean no observations, so there is no value that observations fall below, # which means that any quantile is a NaN. eval instant at 5m histogram_quantile(1.0, sum by (le) (rate(const_histogram_bucket[5m]))) {} NaN -# Zero buckets mean no observations, so there is no value that observations fall bellow, +# Zero buckets mean no observations, so there is no value that observations fall below, # which means that any quantile is a NaN. eval instant at 5m histogram_quantile(1.0, sum(rate(const_histogram[5m]))) {} NaN diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index a9ac0303c..948e15806 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -811,7 +811,7 @@ eval instant at 5m histogram_sum(rate(const_histogram[5m])) eval instant at 5m histogram_fraction(0.0, 1.0, rate(const_histogram[5m])) {} NaN -# Zero buckets mean no observations, so there is no value that observations fall bellow, +# Zero buckets mean no observations, so there is no value that observations fall below, # which means that any quantile is a NaN. eval instant at 5m histogram_quantile(1.0, rate(const_histogram[5m])) {} NaN From 7694c89497c72fd3b163fc101612b464a763b542 Mon Sep 17 00:00:00 2001 From: Lukasz Mierzwa Date: Mon, 12 Aug 2024 14:01:20 +0100 Subject: [PATCH 70/99] Increase TestHangingNotifier timeout This test keeps timing out on our arm64 CI server, it does use a very slow timeout and that 5ms doesn't seem to be enough. But it 10x. Signed-off-by: Lukasz Mierzwa --- notifier/notifier_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index 2cdaa9e06..cf922a537 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -711,7 +711,7 @@ func TestHangingNotifier(t *testing.T) { ) var ( - sendTimeout = 10 * time.Millisecond + sendTimeout = 100 * time.Millisecond sdUpdatert = sendTimeout / 2 done = make(chan struct{}) From 0503d4f3722621904bf095fb0d4805b68e3066d5 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 13 Aug 2024 08:55:24 +0200 Subject: [PATCH 71/99] PromQL: Fix comment regarding non-nil histogram pointer Signed-off-by: Arve Knudsen --- promql/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/promql/engine.go b/promql/engine.go index 1427302e5..30af001d3 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2357,7 +2357,7 @@ loop: histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}}) } if histograms[n].H == nil { - // Make sure to pass non zero H to AtFloatHistogram so that it does a deep-copy. + // Make sure to pass non-nil H to AtFloatHistogram so that it does a deep-copy. // Not an issue in the loop above since that uses an intermediate buffer. histograms[n].H = &histogram.FloatHistogram{} } From 386fc8b9f69c0ac5ca81273a145ced408ea9d6d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 13 Aug 2024 15:26:07 +0200 Subject: [PATCH 72/99] Update from review comments. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- promql/promqltest/testdata/histograms.test | 5 +---- promql/promqltest/testdata/native_histograms.test | 13 +++++++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test index 70df9434e..47cba7993 100644 --- a/promql/promqltest/testdata/histograms.test +++ b/promql/promqltest/testdata/histograms.test @@ -497,8 +497,7 @@ eval instant at 5m rate(const_histogram_bucket[5m]) {le="2.0"} 0 {le="+Inf"} 0 -# There is no change to the bucket count over time, thus rate is 0 in each bucket. -# However native histograms do not represent empty buckets, so here the zeros are implicit. +# Native histograms do not represent empty buckets, so here the zeros are implicit. eval instant at 5m rate(const_histogram[5m]) {} {{schema:-53 sum:0 count:0 custom_values:[0.0 1.0 2.0]}} @@ -507,7 +506,5 @@ eval instant at 5m rate(const_histogram[5m]) eval instant at 5m histogram_quantile(1.0, sum by (le) (rate(const_histogram_bucket[5m]))) {} NaN -# Zero buckets mean no observations, so there is no value that observations fall below, -# which means that any quantile is a NaN. eval instant at 5m histogram_quantile(1.0, sum(rate(const_histogram[5m]))) {} NaN diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 948e15806..c2a5012e9 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -794,7 +794,8 @@ load 1m eval instant at 5m rate(const_histogram[5m]) {} {{schema:0 sum:0 count:0}} -# Zero buckets mean no observations, so average has no meaningful value. +# Zero buckets mean no observations, thus the denominator in the average is 0 +# leading to 0/0, which is NaN. eval instant at 5m histogram_avg(rate(const_histogram[5m])) {} NaN @@ -802,15 +803,19 @@ eval instant at 5m histogram_avg(rate(const_histogram[5m])) eval instant at 5m histogram_count(rate(const_histogram[5m])) {} 0.0 -# Zero buckets mean no observations, so the sum should be NaN, However -# we return 0 for compatibility with classic histograms. +# Zero buckets mean no observations and empty histogram has a sum of 0 by definition. eval instant at 5m histogram_sum(rate(const_histogram[5m])) {} 0.0 -# BUG??? Zero buckets mean no observations, thus any fraction should be 0. +# Zero buckets mean no observations, thus the denominator in the fraction is 0, +# leading to 0/0, which is NaN. eval instant at 5m histogram_fraction(0.0, 1.0, rate(const_histogram[5m])) {} NaN +# Workaround to calculate the observation count corresponding to NaN fraction. +eval instant at 5m histogram_count(rate(const_histogram[5m])) == 0.0 or histogram_fraction(0.0, 1.0, rate(const_histogram[5m])) * histogram_count(rate(const_histogram[5m])) + {} 0.0 + # Zero buckets mean no observations, so there is no value that observations fall below, # which means that any quantile is a NaN. eval instant at 5m histogram_quantile(1.0, rate(const_histogram[5m])) From 41656162fc377a0528b4ceadad1bf4831fd38037 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Wed, 14 Aug 2024 11:13:47 +0200 Subject: [PATCH 73/99] tsdb: prepare inserting native histograms into OOO head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename a variable. Add parameters to memSeries.insert function. No effect on how float samples are handled. Related to #14546 Signed-off-by: György Krajcsovits --- tsdb/head_append.go | 12 ++++++------ tsdb/head_wal.go | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index b508643c3..59681b8da 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -840,7 +840,7 @@ func (a *headAppender) Commit() (err error) { floatsAppended = len(a.samples) histogramsAppended = len(a.histograms) + len(a.floatHistograms) // number of samples out of order but accepted: with ooo enabled and within time window - floatOOOAccepted int + oooFloatsAccepted int // number of samples rejected due to: out of order but OOO support disabled. floatOOORejected int histoOOORejected int @@ -936,7 +936,7 @@ func (a *headAppender) Commit() (err error) { // Sample is OOO and OOO handling is enabled // and the delta is within the OOO tolerance. var mmapRefs []chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax) + ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, oooCapMax) if chunkCreated { r, ok := oooMmapMarkers[series.ref] if !ok || r != nil { @@ -969,7 +969,7 @@ func (a *headAppender) Commit() (err error) { if s.T > oooMaxT { oooMaxT = s.T } - floatOOOAccepted++ + oooFloatsAccepted++ } else { // Sample is an exact duplicate of the last sample. // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, @@ -1065,7 +1065,7 @@ func (a *headAppender) Commit() (err error) { a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatTooOldRejected)) a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatsAppended)) a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended)) - a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOOAccepted)) + a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(oooFloatsAccepted)) a.head.updateMinMaxTime(inOrderMint, inOrderMaxt) a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT) @@ -1083,7 +1083,7 @@ func (a *headAppender) Commit() (err error) { } // insert is like append, except it inserts. Used for OOO samples. -func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { +func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { if s.ooo == nil { s.ooo = &memSeriesOOOFields{} } @@ -1094,7 +1094,7 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk chunkCreated = true } - ok := c.chunk.Insert(t, v, nil, nil) + ok := c.chunk.Insert(t, v, h, fh) if ok { if chunkCreated || t < c.minTime { c.minTime = t diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 2852709a0..85b0c656d 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -890,7 +890,7 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) { unknownRefs++ continue } - ok, chunkCreated, _ := ms.insert(s.T, s.V, h.chunkDiskMapper, oooCapMax) + ok, chunkCreated, _ := ms.insert(s.T, s.V, nil, nil, h.chunkDiskMapper, oooCapMax) if chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() From 0c852680bf921036624f6672b7814ad380a99222 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sat, 29 Jun 2024 17:49:49 +0100 Subject: [PATCH 74/99] [Benchmark] TSDB: Add BenchmarkQuerierSelectWithOutOfOrder Refactor existing BenchmarkQuerierSelect to provide the set-up. Note that Head queries now run faster because they use a RangeHead. Signed-off-by: Bryan Boreham --- tsdb/block.go | 5 ++ tsdb/querier_bench_test.go | 106 +++++++++++++++++++++++++------------ 2 files changed, 76 insertions(+), 35 deletions(-) diff --git a/tsdb/block.go b/tsdb/block.go index 2f32733f8..c55e22ce5 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -467,6 +467,11 @@ func (pb *Block) setCompactionFailed() error { return nil } +// Querier implements Queryable. +func (pb *Block) Querier(mint, maxt int64) (storage.Querier, error) { + return NewBlockQuerier(pb, mint, maxt) +} + type blockIndexReader struct { ir IndexReader b *Block diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index 9a8230242..e3e457d07 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/index" "github.com/stretchr/testify/require" @@ -254,56 +255,91 @@ func BenchmarkMergedStringIter(b *testing.B) { b.ReportAllocs() } -func BenchmarkQuerierSelect(b *testing.B) { - opts := DefaultHeadOptions() - opts.ChunkRange = 1000 - opts.ChunkDirRoot = b.TempDir() - h, err := NewHead(nil, nil, nil, nil, opts, nil) +func createHeadForBenchmarkSelect(b *testing.B, numSeries int, addSeries func(app storage.Appender, i int)) (*Head, *DB) { + dir := b.TempDir() + opts := DefaultOptions() + opts.OutOfOrderCapMax = 255 + opts.OutOfOrderTimeWindow = 1000 + db, err := Open(dir, nil, nil, opts, nil) require.NoError(b, err) - defer h.Close() + b.Cleanup(func() { + require.NoError(b, db.Close()) + }) + h := db.Head() + app := h.Appender(context.Background()) - numSeries := 1000000 for i := 0; i < numSeries; i++ { - app.Append(0, labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0) + addSeries(app, i) } require.NoError(b, app.Commit()) + return h, db +} - bench := func(b *testing.B, br BlockReader, sorted bool) { - matcher := labels.MustNewMatcher(labels.MatchEqual, "foo", "bar") - for s := 1; s <= numSeries; s *= 10 { - b.Run(fmt.Sprintf("%dof%d", s, numSeries), func(b *testing.B) { - q, err := NewBlockQuerier(br, 0, int64(s-1)) - require.NoError(b, err) +func benchmarkSelect(b *testing.B, queryable storage.Queryable, numSeries int, sorted bool) { + matcher := labels.MustNewMatcher(labels.MatchEqual, "foo", "bar") + b.ResetTimer() + for s := 1; s <= numSeries; s *= 10 { + b.Run(fmt.Sprintf("%dof%d", s, numSeries), func(b *testing.B) { + q, err := queryable.Querier(0, int64(s-1)) + require.NoError(b, err) - b.ResetTimer() - for i := 0; i < b.N; i++ { - ss := q.Select(context.Background(), sorted, nil, matcher) - for ss.Next() { - } - require.NoError(b, ss.Err()) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ss := q.Select(context.Background(), sorted, nil, matcher) + for ss.Next() { } - q.Close() - }) - } + require.NoError(b, ss.Err()) + } + q.Close() + }) } +} + +func BenchmarkQuerierSelect(b *testing.B) { + numSeries := 1000000 + h, db := createHeadForBenchmarkSelect(b, numSeries, func(app storage.Appender, i int) { + _, err := app.Append(0, labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0) + if err != nil { + b.Fatal(err) + } + }) b.Run("Head", func(b *testing.B) { - bench(b, h, false) + benchmarkSelect(b, db, numSeries, false) }) b.Run("SortedHead", func(b *testing.B) { - bench(b, h, true) + benchmarkSelect(b, db, numSeries, true) }) - tmpdir := b.TempDir() - - blockdir := createBlockFromHead(b, tmpdir, h) - block, err := OpenBlock(nil, blockdir, nil) - require.NoError(b, err) - defer func() { - require.NoError(b, block.Close()) - }() - b.Run("Block", func(b *testing.B) { - bench(b, block, false) + tmpdir := b.TempDir() + + blockdir := createBlockFromHead(b, tmpdir, h) + block, err := OpenBlock(nil, blockdir, nil) + require.NoError(b, err) + defer func() { + require.NoError(b, block.Close()) + }() + + benchmarkSelect(b, block, numSeries, false) + }) +} + +func BenchmarkQuerierSelectWithOutOfOrder(b *testing.B) { + numSeries := 1000000 + _, db := createHeadForBenchmarkSelect(b, numSeries, func(app storage.Appender, i int) { + l := labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)) + ref, err := app.Append(0, l, int64(i+1), 0) + if err != nil { + b.Fatal(err) + } + _, err = app.Append(ref, l, int64(i), 1) // Out of order sample + if err != nil { + b.Fatal(err) + } + }) + + b.Run("Head", func(b *testing.B) { + benchmarkSelect(b, db, numSeries, false) }) } From c75c8f8329758f82279d62b483a50c2fae00c283 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 24 Jun 2024 21:06:50 +0100 Subject: [PATCH 75/99] Refactoring: extract getSeriesChunks Signed-off-by: Bryan Boreham --- tsdb/head_read.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tsdb/head_read.go b/tsdb/head_read.go index c8b394be8..ff9345fa0 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -200,9 +200,15 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB *chks = (*chks)[:0] + getSeriesChunks(s, h.mint, h.maxt, chks) + + return nil +} + +func getSeriesChunks(s *memSeries, mint, maxt int64, chks *[]chunks.Meta) { for i, c := range s.mmappedChunks { // Do not expose chunks that are outside of the specified range. - if !c.OverlapsClosedInterval(h.mint, h.maxt) { + if !c.OverlapsClosedInterval(mint, maxt) { continue } *chks = append(*chks, chunks.Meta{ @@ -223,7 +229,7 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB } else { maxTime = chk.maxTime } - if chk.OverlapsClosedInterval(h.mint, h.maxt) { + if chk.OverlapsClosedInterval(mint, maxt) { *chks = append(*chks, chunks.Meta{ MinTime: chk.minTime, MaxTime: maxTime, @@ -233,8 +239,6 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB j++ } } - - return nil } // headChunkID returns the HeadChunkID referred to by the given position. From a32aca0cd74d5d1acbef0abd58ec48f2a8e560c5 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 27 Jun 2024 09:25:26 +0100 Subject: [PATCH 76/99] Refactoring: extract getOOOSeriesChunks Signed-off-by: Bryan Boreham --- tsdb/ooo_head_read.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 9d5b9d644..892d2c4b6 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -92,6 +92,10 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra return nil } + return getOOOSeriesChunks(s, oh.mint, oh.maxt, lastGarbageCollectedMmapRef, maxMmapRef, chks) +} + +func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, chks *[]chunks.Meta) error { tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks)) addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) { @@ -106,7 +110,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra // Collect all chunks that overlap the query range. if s.ooo.oooHeadChunk != nil { c := s.ooo.oooHeadChunk - if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 { + if c.OverlapsClosedInterval(mint, maxt) && maxMmapRef == 0 { ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks)))) if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least. chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(c.minTime, c.maxTime) @@ -125,7 +129,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra } for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- { c := s.ooo.oooMmappedChunks[i] - if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) { + if c.OverlapsClosedInterval(mint, maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) { ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))) addChunk(c.minTime, c.maxTime, ref, nil) } From 7e24844d081f82ef1d3933ace4477d60cec7d05b Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 26 Jun 2024 20:48:39 +0100 Subject: [PATCH 77/99] Refactor: extract headChunkReader.chunkFromSeries() For when you have a series locked already. Signed-off-by: Bryan Boreham --- tsdb/head_read.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tsdb/head_read.go b/tsdb/head_read.go index ff9345fa0..d75d28a58 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -362,9 +362,14 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc. } s.Lock() + defer s.Unlock() + return h.chunkFromSeries(s, cid, copyLastChunk) +} + +// Call with s locked. +func (h *headChunkReader) chunkFromSeries(s *memSeries, cid chunks.HeadChunkID, copyLastChunk bool) (chunkenc.Chunk, int64, error) { c, headChunk, isOpen, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool) if err != nil { - s.Unlock() return nil, 0, err } defer func() { @@ -378,7 +383,6 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc. // This means that the chunk is outside the specified range. if !c.OverlapsClosedInterval(h.mint, h.maxt) { - s.Unlock() return nil, 0, storage.ErrNotFound } @@ -395,7 +399,6 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc. return nil, 0, err } } - s.Unlock() return &safeHeadChunk{ Chunk: chk, From da31da3ea6f46da2b3c605d5a85c4d3fc80dd560 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 27 Jun 2024 10:36:25 +0100 Subject: [PATCH 78/99] Refactor: extract selectSeriesSet and selectChunkSeriesSet Signed-off-by: Bryan Boreham --- tsdb/querier.go | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/tsdb/querier.go b/tsdb/querier.go index 910c2d7fc..37456d7e2 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -115,20 +115,24 @@ func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) { } func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet { - mint := q.mint - maxt := q.maxt + return selectSeriesSet(ctx, sortSeries, hints, ms, q.index, q.chunks, q.tombstones, q.mint, q.maxt) +} + +func selectSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher, + index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64, +) storage.SeriesSet { disableTrimming := false sharded := hints != nil && hints.ShardCount > 0 - p, err := PostingsForMatchers(ctx, q.index, ms...) + p, err := PostingsForMatchers(ctx, index, ms...) if err != nil { return storage.ErrSeriesSet(err) } if sharded { - p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount) + p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount) } if sortSeries { - p = q.index.SortedPostings(p) + p = index.SortedPostings(p) } if hints != nil { @@ -137,11 +141,11 @@ func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *stora disableTrimming = hints.DisableTrimming if hints.Func == "series" { // When you're only looking up metadata (for example series API), you don't need to load any chunks. - return newBlockSeriesSet(q.index, newNopChunkReader(), q.tombstones, p, mint, maxt, disableTrimming) + return newBlockSeriesSet(index, newNopChunkReader(), tombstones, p, mint, maxt, disableTrimming) } } - return newBlockSeriesSet(q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) + return newBlockSeriesSet(index, chunks, tombstones, p, mint, maxt, disableTrimming) } // blockChunkQuerier provides chunk querying access to a single block database. @@ -159,8 +163,12 @@ func NewBlockChunkQuerier(b BlockReader, mint, maxt int64) (storage.ChunkQuerier } func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet { - mint := q.mint - maxt := q.maxt + return selectChunkSeriesSet(ctx, sortSeries, hints, ms, q.blockID, q.index, q.chunks, q.tombstones, q.mint, q.maxt) +} + +func selectChunkSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher, + blockID ulid.ULID, index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64, +) storage.ChunkSeriesSet { disableTrimming := false sharded := hints != nil && hints.ShardCount > 0 @@ -169,17 +177,17 @@ func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints * maxt = hints.End disableTrimming = hints.DisableTrimming } - p, err := PostingsForMatchers(ctx, q.index, ms...) + p, err := PostingsForMatchers(ctx, index, ms...) if err != nil { return storage.ErrChunkSeriesSet(err) } if sharded { - p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount) + p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount) } if sortSeries { - p = q.index.SortedPostings(p) + p = index.SortedPostings(p) } - return NewBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) + return NewBlockChunkSeriesSet(blockID, index, chunks, tombstones, p, mint, maxt, disableTrimming) } // PostingsForMatchers assembles a single postings iterator against the index reader From 2936ab80d7dbc0c944d99346ea7ab26449fe82d3 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 27 Jun 2024 12:47:31 +0100 Subject: [PATCH 79/99] [Tests] Promtool: Sort output where Prometheus does not guarantee the order. Previously this was working because iout-of-order chunks forced a sort and merge. Signed-off-by: Bryan Boreham --- cmd/promtool/tsdb_test.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/cmd/promtool/tsdb_test.go b/cmd/promtool/tsdb_test.go index 75089b168..d7cc56088 100644 --- a/cmd/promtool/tsdb_test.go +++ b/cmd/promtool/tsdb_test.go @@ -20,6 +20,7 @@ import ( "math" "os" "runtime" + "slices" "strings" "testing" "time" @@ -152,12 +153,18 @@ func TestTSDBDump(t *testing.T) { expectedMetrics, err := os.ReadFile(tt.expectedDump) require.NoError(t, err) expectedMetrics = normalizeNewLine(expectedMetrics) - // even though in case of one matcher samples are not sorted, the order in the cases above should stay the same. - require.Equal(t, string(expectedMetrics), dumpedMetrics) + // Sort both, because Prometheus does not guarantee the output order. + require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics)) }) } } +func sortLines(buf string) string { + lines := strings.Split(buf, "\n") + slices.Sort(lines) + return strings.Join(lines, "\n") +} + func TestTSDBDumpOpenMetrics(t *testing.T) { storage := promqltest.LoadedStorage(t, ` load 1m @@ -169,7 +176,7 @@ func TestTSDBDumpOpenMetrics(t *testing.T) { require.NoError(t, err) expectedMetrics = normalizeNewLine(expectedMetrics) dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) - require.Equal(t, string(expectedMetrics), dumpedMetrics) + require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics)) } func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) { From e04d137649697ea59b0e5dbfad965ae24d6c0faa Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 24 Jun 2024 13:41:44 +0100 Subject: [PATCH 80/99] [PERF] TSDB: Query head and ooo-head together Add `HeadAndOOOQuerier` which iterates just once over series, then where necessary merges chunks from in-order and out-of-order lists. Add a ChunkQuerier for in-order and ooo together Add copy-last-chunk behaviour to HeadAndOOOChunkReader Out-of-order chunk IDs are distinguished from in-order by setting bit 23. Signed-off-by: Bryan Boreham --- tsdb/db.go | 58 +++++++----- tsdb/head_read.go | 51 +++++++++-- tsdb/ooo_head_read.go | 182 ++++++++++++++++++++++++++++++++++++- tsdb/ooo_head_read_test.go | 2 +- tsdb/querier.go | 12 ++- 5 files changed, 263 insertions(+), 42 deletions(-) diff --git a/tsdb/db.go b/tsdb/db.go index 1c430c211..bf1893ec0 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -2029,7 +2029,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { } } - blockQueriers := make([]storage.Querier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers + blockQueriers := make([]storage.Querier, 0, len(blocks)+1) // +1 to allow for possible head querier. defer func() { if err != nil { @@ -2041,10 +2041,11 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { } }() + var headQuerier storage.Querier if maxt >= db.head.MinTime() { rh := NewRangeHead(db.head, mint, maxt) var err error - inOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt) + headQuerier, err = db.blockQuerierFunc(rh, mint, maxt) if err != nil { return nil, fmt.Errorf("open block querier for head %s: %w", rh, err) } @@ -2054,36 +2055,40 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { // won't run into a race later since any truncation that comes after will wait on this querier if it overlaps. shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { - if err := inOrderHeadQuerier.Close(); err != nil { + if err := headQuerier.Close(); err != nil { return nil, fmt.Errorf("closing head block querier %s: %w", rh, err) } - inOrderHeadQuerier = nil + headQuerier = nil } if getNew { rh := NewRangeHead(db.head, newMint, maxt) - inOrderHeadQuerier, err = db.blockQuerierFunc(rh, newMint, maxt) + headQuerier, err = db.blockQuerierFunc(rh, newMint, maxt) if err != nil { return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err) } } - - if inOrderHeadQuerier != nil { - blockQueriers = append(blockQueriers, inOrderHeadQuerier) - } } - if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { + if headQuerier != nil { + if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { + // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. + isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) + headQuerier = NewHeadAndOOOQuerier(mint, maxt, db.head, isoState, headQuerier) + } + } else if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) var err error - outOfOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt) + headQuerier, err = db.blockQuerierFunc(rh, mint, maxt) if err != nil { // If BlockQuerierFunc() failed, make sure to clean up the pending read created by NewOOORangeHead. rh.isoState.Close() return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err) } + } - blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) + if headQuerier != nil { + blockQueriers = append(blockQueriers, headQuerier) } for _, b := range blocks { @@ -2111,7 +2116,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer } } - blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers + blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)+1) // +1 to allow for possible head querier. defer func() { if err != nil { @@ -2123,9 +2128,10 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer } }() + var headQuerier storage.ChunkQuerier if maxt >= db.head.MinTime() { rh := NewRangeHead(db.head, mint, maxt) - inOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt) + headQuerier, err = db.blockChunkQuerierFunc(rh, mint, maxt) if err != nil { return nil, fmt.Errorf("open querier for head %s: %w", rh, err) } @@ -2135,35 +2141,39 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer // won't run into a race later since any truncation that comes after will wait on this querier if it overlaps. shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { - if err := inOrderHeadQuerier.Close(); err != nil { + if err := headQuerier.Close(); err != nil { return nil, fmt.Errorf("closing head querier %s: %w", rh, err) } - inOrderHeadQuerier = nil + headQuerier = nil } if getNew { rh := NewRangeHead(db.head, newMint, maxt) - inOrderHeadQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt) + headQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt) if err != nil { return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err) } } - - if inOrderHeadQuerier != nil { - blockQueriers = append(blockQueriers, inOrderHeadQuerier) - } } - if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { + if headQuerier != nil { + if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { + // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. + isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) + headQuerier = NewHeadAndOOOChunkQuerier(mint, maxt, db.head, isoState, headQuerier) + } + } else if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) - outOfOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt) + headQuerier, err = db.blockChunkQuerierFunc(rh, mint, maxt) if err != nil { // If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead. rh.isoState.Close() return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err) } + } - blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) + if headQuerier != nil { + blockQueriers = append(blockQueriers, headQuerier) } for _, b := range blocks { diff --git a/tsdb/head_read.go b/tsdb/head_read.go index d75d28a58..977d6b978 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -248,12 +248,20 @@ func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID { return chunks.HeadChunkID(pos) + s.firstChunkID } +const oooChunkIDMask = 1 << 23 + // oooHeadChunkID returns the HeadChunkID referred to by the given position. +// Only the bottom 24 bits are used. Bit 23 is always 1 for an OOO chunk; for the rest: // * 0 <= pos < len(s.oooMmappedChunks) refer to s.oooMmappedChunks[pos] // * pos == len(s.oooMmappedChunks) refers to s.oooHeadChunk // The caller must ensure that s.ooo is not nil. func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID { - return chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID + return (chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID) | oooChunkIDMask +} + +func unpackHeadChunkRef(ref chunks.ChunkRef) (chunks.HeadSeriesRef, chunks.HeadChunkID, bool) { + sid, cid := chunks.HeadChunkRef(ref).Unpack() + return sid, (cid & (oooChunkIDMask - 1)), (cid & oooChunkIDMask) != 0 } // LabelValueFor returns label value for the given label name in the series referred to by ID. @@ -343,10 +351,15 @@ func (h *headChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chu return chk, nil, err } -// ChunkWithCopy returns the chunk for the reference number. -// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk. -func (h *headChunkReader) ChunkWithCopy(meta chunks.Meta) (chunkenc.Chunk, int64, error) { - return h.chunk(meta, true) +type ChunkReaderWithCopy interface { + ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) +} + +// ChunkOrIterableWithCopy returns the chunk for the reference number. +// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk, plus the max time of the chunk. +func (h *headChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) { + chk, maxTime, err := h.chunk(meta, true) + return chk, nil, maxTime, err } // chunk returns the chunk for the reference number. @@ -472,10 +485,11 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi // chunks.Meta reference from memory or by m-mapping it from the disk. The // returned iterable will be a merge of all the overlapping chunks, if any, // amongst all the chunks in the OOOHead. +// If hr is non-nil then in-order chunks are included. // This function is not thread safe unless the caller holds a lock. // The caller must ensure that s.ooo is not nil. -func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64, maxMmapRef chunks.ChunkDiskMapperRef) (*mergedOOOChunks, error) { - _, cid := chunks.HeadChunkRef(meta.Ref).Unpack() +func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, hr *headChunkReader, mint, maxt int64, maxMmapRef chunks.ChunkDiskMapperRef) (*mergedOOOChunks, error) { + _, cid, _ := unpackHeadChunkRef(meta.Ref) // ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are // incremented by 1 when new chunk is created, hence (meta - firstChunkID) gives the slice index. @@ -516,6 +530,17 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{meta: meta}) } + if hr != nil { // Include in-order chunks. + var metas []chunks.Meta + getSeriesChunks(s, max(meta.MinTime, mint), min(meta.MaxTime, maxt), &metas) + for _, m := range metas { + tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ + meta: m, + ref: 0, // This tells the loop below it's an in-order head chunk. + }) + } + } + // Next we want to sort all the collected chunks by min time so we can find // those that overlap and stop when we know the rest don't. slices.SortFunc(tmpChks, refLessByMinTimeAndMinRef) @@ -527,9 +552,17 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe continue } var iterable chunkenc.Iterable - if c.meta.Chunk != nil { + switch { + case c.meta.Chunk != nil: iterable = c.meta.Chunk - } else { + case c.ref == 0: // This is an in-order head chunk. + _, cid := chunks.HeadChunkRef(c.meta.Ref).Unpack() + var err error + iterable, _, err = hr.chunkFromSeries(s, cid, false) + if err != nil { + return nil, fmt.Errorf("invalid head chunk: %w", err) + } + default: chk, err := cdm.Chunk(c.ref) if err != nil { var cerr *chunks.CorruptionErr diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 892d2c4b6..b7944c56e 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/tombstones" + "github.com/prometheus/prometheus/util/annotations" ) var _ IndexReader = &OOOHeadIndexReader{} @@ -92,10 +93,10 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra return nil } - return getOOOSeriesChunks(s, oh.mint, oh.maxt, lastGarbageCollectedMmapRef, maxMmapRef, chks) + return getOOOSeriesChunks(s, oh.mint, oh.maxt, lastGarbageCollectedMmapRef, maxMmapRef, false, chks) } -func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, chks *[]chunks.Meta) error { +func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, chks *[]chunks.Meta) error { tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks)) addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) { @@ -135,6 +136,10 @@ func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmap } } + if includeInOrder { + getSeriesChunks(s, mint, maxt, &tmpChks) + } + // There is nothing to do if we did not collect any chunk. if len(tmpChks) == 0 { return nil @@ -275,7 +280,7 @@ func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, s.Unlock() return nil, nil, storage.ErrNotFound } - mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt, cr.maxMmapRef) + mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, nil, cr.mint, cr.maxt, cr.maxMmapRef) s.Unlock() if err != nil { return nil, nil, err @@ -498,3 +503,174 @@ func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, posti func (ir *OOOCompactionHeadIndexReader) Close() error { return ir.ch.oooIR.Close() } + +// HeadAndOOOQuerier queries both the head and the out-of-order head. +type HeadAndOOOQuerier struct { + mint, maxt int64 + head *Head + index IndexReader + chunkr ChunkReader + querier storage.Querier +} + +func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier { + isoState := head.iso.State(mint, maxt) + return &HeadAndOOOQuerier{ + mint: mint, + maxt: maxt, + head: head, + index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef), + chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, isoState, oooIsoState, 0), + querier: querier, + } +} + +func (q *HeadAndOOOQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return q.querier.LabelValues(ctx, name, hints, matchers...) +} + +func (q *HeadAndOOOQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return q.querier.LabelNames(ctx, hints, matchers...) +} + +func (q *HeadAndOOOQuerier) Close() error { + q.chunkr.Close() + return q.querier.Close() +} + +func (q *HeadAndOOOQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + return selectSeriesSet(ctx, sortSeries, hints, matchers, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt) +} + +// HeadAndOOOChunkQuerier queries both the head and the out-of-order head. +type HeadAndOOOChunkQuerier struct { + mint, maxt int64 + head *Head + index IndexReader + chunkr ChunkReader + querier storage.ChunkQuerier +} + +func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier { + isoState := head.iso.State(mint, maxt) + return &HeadAndOOOChunkQuerier{ + mint: mint, + maxt: maxt, + head: head, + index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef), + chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, isoState, oooIsoState, 0), + querier: querier, + } +} + +func (q *HeadAndOOOChunkQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return q.querier.LabelValues(ctx, name, hints, matchers...) +} + +func (q *HeadAndOOOChunkQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return q.querier.LabelNames(ctx, hints, matchers...) +} + +func (q *HeadAndOOOChunkQuerier) Close() error { + q.chunkr.Close() + return q.querier.Close() +} + +func (q *HeadAndOOOChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { + return selectChunkSeriesSet(ctx, sortSeries, hints, matchers, rangeHeadULID, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt) +} + +type HeadAndOOOIndexReader struct { + *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. + lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef +} + +func NewHeadAndOOOIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader { + hr := &headIndexReader{ + head: head, + mint: mint, + maxt: maxt, + } + return &HeadAndOOOIndexReader{hr, lastGarbageCollectedMmapRef} +} + +func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { + s := oh.head.series.getByID(chunks.HeadSeriesRef(ref)) + if s == nil { + oh.head.metrics.seriesNotFound.Inc() + return storage.ErrNotFound + } + builder.Assign(s.lset) + + if chks == nil { + return nil + } + + s.Lock() + defer s.Unlock() + *chks = (*chks)[:0] + + if s.ooo != nil { + return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, chks) + } + getSeriesChunks(s, oh.mint, oh.maxt, chks) + return nil +} + +type HeadAndOOOChunkReader struct { + cr headChunkReader + maxMmapRef chunks.ChunkDiskMapperRef + oooIsoState *oooIsolationState +} + +func NewHeadAndOOOChunkReader(head *Head, mint, maxt int64, isoState *isolationState, oooIsoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOChunkReader { + return &HeadAndOOOChunkReader{ + cr: headChunkReader{ + head: head, + mint: mint, + maxt: maxt, + isoState: isoState, + }, + maxMmapRef: maxMmapRef, + oooIsoState: oooIsoState, + } +} + +func (cr *HeadAndOOOChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { + sid, _, isOOO := unpackHeadChunkRef(meta.Ref) + if !isOOO { + return cr.cr.ChunkOrIterable(meta) + } + + s := cr.cr.head.series.getByID(sid) + // This means that the series has been garbage collected. + if s == nil { + return nil, nil, storage.ErrNotFound + } + + s.Lock() + mc, err := s.oooMergedChunks(meta, cr.cr.head.chunkDiskMapper, &cr.cr, cr.cr.mint, cr.cr.maxt, cr.maxMmapRef) + s.Unlock() + + return nil, mc, err +} + +// Pass through special behaviour for current head chunk. +func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) { + _, _, isOOO := unpackHeadChunkRef(meta.Ref) + if !isOOO { + return cr.cr.ChunkOrIterableWithCopy(meta) + } + chk, iter, err := cr.ChunkOrIterable(meta) + return chk, iter, 0, err +} + +func (cr *HeadAndOOOChunkReader) Close() error { + if cr.cr.isoState != nil { + cr.cr.isoState.Close() + } + if cr.oooIsoState != nil { + cr.oooIsoState.Close() + } + return nil +} diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index 8cc3f1dde..08c5c4a3e 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -316,7 +316,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { // Ref to whatever Ref the chunk has, that we refer to by ID for ref, c := range intervals { if c.ID == e.ID { - meta.Ref = chunks.ChunkRef(chunks.NewHeadChunkRef(chunks.HeadSeriesRef(s1ID), chunks.HeadChunkID(ref))) + meta.Ref = chunks.ChunkRef(chunks.NewHeadChunkRef(chunks.HeadSeriesRef(s1ID), s1.oooHeadChunkID(ref))) break } } diff --git a/tsdb/querier.go b/tsdb/querier.go index 37456d7e2..2e15f0b08 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -641,14 +641,16 @@ func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool { } } - hcr, ok := p.cr.(*headChunkReader) + hcr, ok := p.cr.(ChunkReaderWithCopy) var iterable chunkenc.Iterable if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 { - // ChunkWithCopy will copy the head chunk. + // ChunkOrIterableWithCopy will copy the head chunk, if it can. var maxt int64 - p.currMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currMeta) - // For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here. - p.currMeta.MaxTime = maxt + p.currMeta.Chunk, iterable, maxt, p.err = hcr.ChunkOrIterableWithCopy(p.currMeta) + if p.currMeta.Chunk != nil { + // For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here. + p.currMeta.MaxTime = maxt + } } else { p.currMeta.Chunk, iterable, p.err = p.cr.ChunkOrIterable(p.currMeta) } From 6529d6336cc277aefef78a595128a65a719e86a0 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 15 Jul 2024 20:07:12 +0100 Subject: [PATCH 81/99] TSDB: NewHeadAndOOOChunkReader takes headChunkReader So we can pass nil and have it read just OOO chunks. Signed-off-by: Bryan Boreham --- tsdb/ooo_head_read.go | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index b7944c56e..4be4e9e18 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -514,13 +514,18 @@ type HeadAndOOOQuerier struct { } func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier { - isoState := head.iso.State(mint, maxt) + cr := &headChunkReader{ + head: head, + mint: mint, + maxt: maxt, + isoState: head.iso.State(mint, maxt), + } return &HeadAndOOOQuerier{ mint: mint, maxt: maxt, head: head, index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef), - chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, isoState, oooIsoState, 0), + chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0), querier: querier, } } @@ -552,13 +557,18 @@ type HeadAndOOOChunkQuerier struct { } func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier { - isoState := head.iso.State(mint, maxt) + cr := &headChunkReader{ + head: head, + mint: mint, + maxt: maxt, + isoState: head.iso.State(mint, maxt), + } return &HeadAndOOOChunkQuerier{ mint: mint, maxt: maxt, head: head, index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef), - chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, isoState, oooIsoState, 0), + chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0), querier: querier, } } @@ -618,19 +628,19 @@ func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.S } type HeadAndOOOChunkReader struct { - cr headChunkReader + head *Head + mint, maxt int64 + cr *headChunkReader // If nil, only read OOO chunks. maxMmapRef chunks.ChunkDiskMapperRef oooIsoState *oooIsolationState } -func NewHeadAndOOOChunkReader(head *Head, mint, maxt int64, isoState *isolationState, oooIsoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOChunkReader { +func NewHeadAndOOOChunkReader(head *Head, mint, maxt int64, cr *headChunkReader, oooIsoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOChunkReader { return &HeadAndOOOChunkReader{ - cr: headChunkReader{ - head: head, - mint: mint, - maxt: maxt, - isoState: isoState, - }, + head: head, + mint: mint, + maxt: maxt, + cr: cr, maxMmapRef: maxMmapRef, oooIsoState: oooIsoState, } @@ -642,14 +652,14 @@ func (cr *HeadAndOOOChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chu return cr.cr.ChunkOrIterable(meta) } - s := cr.cr.head.series.getByID(sid) + s := cr.head.series.getByID(sid) // This means that the series has been garbage collected. if s == nil { return nil, nil, storage.ErrNotFound } s.Lock() - mc, err := s.oooMergedChunks(meta, cr.cr.head.chunkDiskMapper, &cr.cr, cr.cr.mint, cr.cr.maxt, cr.maxMmapRef) + mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.cr, cr.mint, cr.maxt, cr.maxMmapRef) s.Unlock() return nil, mc, err @@ -666,7 +676,7 @@ func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chun } func (cr *HeadAndOOOChunkReader) Close() error { - if cr.cr.isoState != nil { + if cr.cr != nil && cr.cr.isoState != nil { cr.cr.isoState.Close() } if cr.oooIsoState != nil { From f26159794434d20c0ec3081d0bd080b37756cc60 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 15 Jul 2024 20:56:55 +0100 Subject: [PATCH 82/99] TSDB: Fix up LabelValues to work for OOO-only head Signed-off-by: Bryan Boreham --- tsdb/ooo_head_read.go | 14 ++++++++++++++ tsdb/ooo_head_read_test.go | 10 +++++----- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 4be4e9e18..f844cfaca 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -627,6 +627,20 @@ func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.S return nil } +// LabelValues needs to be overridden from the headIndexReader implementation +// so we can return labels within either in-order range or ooo range. +func (oh *HeadAndOOOIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { + if oh.maxt < oh.head.MinTime() && oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxTime() && oh.mint > oh.head.MaxOOOTime() { + return []string{}, nil + } + + if len(matchers) == 0 { + return oh.head.postings.LabelValues(ctx, name), nil + } + + return labelValuesWithMatchers(ctx, oh, name, matchers...) +} + type HeadAndOOOChunkReader struct { head *Head mint, maxt int64 diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index 08c5c4a3e..b837b9e2f 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -421,17 +421,17 @@ func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenari name: "LabelValues calls with ooo head query range not overlapping out-of-order data", queryMinT: 100, queryMaxT: 100, - expValues1: []string{}, - expValues2: []string{}, - expValues3: []string{}, - expValues4: []string{}, + expValues1: []string{"bar1"}, + expValues2: nil, + expValues3: []string{"bar1", "bar2"}, + expValues4: []string{"bar1", "bar2"}, }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { // We first want to test using a head index reader that covers the biggest query interval - oh := NewOOOHeadIndexReader(head, tc.queryMinT, tc.queryMaxT, 0) + oh := NewHeadAndOOOIndexReader(head, tc.queryMinT, tc.queryMaxT, 0) matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")} values, err := oh.LabelValues(ctx, "foo", matchers...) sort.Strings(values) From ce4eac859a3231eadcfb392e3c2d27d243c0e07f Mon Sep 17 00:00:00 2001 From: suntala Date: Thu, 8 Aug 2024 20:59:00 +0200 Subject: [PATCH 83/99] Link to specific feature flag entry Signed-off-by: suntala --- docs/querying/functions.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index ee81328b5..bf2701b88 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -617,7 +617,7 @@ Like `sort`, `sort_desc` only affects the results of instant queries, as range q ## `sort_by_label()` -**This function has to be enabled via the [feature flag](../feature_flags.md) `--enable-feature=promql-experimental-functions`.** +**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** `sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by their label values and sample value in case of label values being equal, in ascending order. @@ -627,7 +627,7 @@ This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_so ## `sort_by_label_desc()` -**This function has to be enabled via the [feature flag](../feature_flags.md) `--enable-feature=promql-experimental-functions`.** +**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** Same as `sort_by_label`, but sorts in descending order. @@ -676,7 +676,7 @@ over time and return an instant vector with per-series aggregation results: * `last_over_time(range-vector)`: the most recent point value in the specified interval. * `present_over_time(range-vector)`: the value 1 for any series in the specified interval. -If the [feature flag](../feature_flags.md) +If the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions` is set, the following additional functions are available: From 0a2ff76881a82bd2751cd3f316494b9ab5621b07 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 15 Jul 2024 18:17:48 +0100 Subject: [PATCH 84/99] TSDB tests: Fix up BenchmarkQueries Was not working even on main. Some cases still error. Signed-off-by: Bryan Boreham --- tsdb/querier_test.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index ffdf8dc02..50525f65f 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -3169,12 +3169,11 @@ func BenchmarkQueries(b *testing.B) { qHead, err := NewBlockQuerier(NewRangeHead(head, 1, nSamples), 1, nSamples) require.NoError(b, err) - qOOOHead, err := NewBlockQuerier(NewOOORangeHead(head, 1, nSamples, 0), 1, nSamples) - require.NoError(b, err) + isoState := head.oooIso.TrackReadAfter(0) + qOOOHead := NewHeadAndOOOQuerier(1, nSamples, head, isoState, qHead) queryTypes = append(queryTypes, qt{ - fmt.Sprintf("_Head_oooPercent:%d", oooPercentage), - storage.NewMergeQuerier([]storage.Querier{qHead, qOOOHead}, nil, storage.ChainedSeriesMerge), + fmt.Sprintf("_Head_oooPercent:%d", oooPercentage), qOOOHead, }) } From e7e50a3afd285136366ebbb0270cce442df3c1b1 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 15 Jul 2024 18:27:31 +0100 Subject: [PATCH 85/99] TSDB: Remove code for querying OOO-head only Just query via `HeadAndOOOQuerier`, which will skip series where no in-order chunks are in range. Now we don't need `OOORangeHead`. Signed-off-by: Bryan Boreham --- tsdb/db.go | 45 ++++++++-------------------- tsdb/ooo_head.go | 78 ------------------------------------------------ 2 files changed, 12 insertions(+), 111 deletions(-) diff --git a/tsdb/db.go b/tsdb/db.go index bf1893ec0..94c44161d 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -2041,8 +2041,9 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { } }() + overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) var headQuerier storage.Querier - if maxt >= db.head.MinTime() { + if maxt >= db.head.MinTime() || overlapsOOO { rh := NewRangeHead(db.head, mint, maxt) var err error headQuerier, err = db.blockQuerierFunc(rh, mint, maxt) @@ -2069,22 +2070,10 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { } } - if headQuerier != nil { - if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { - // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. - isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) - headQuerier = NewHeadAndOOOQuerier(mint, maxt, db.head, isoState, headQuerier) - } - } else if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { - rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) - var err error - headQuerier, err = db.blockQuerierFunc(rh, mint, maxt) - if err != nil { - // If BlockQuerierFunc() failed, make sure to clean up the pending read created by NewOOORangeHead. - rh.isoState.Close() - - return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err) - } + if overlapsOOO { + // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. + isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) + headQuerier = NewHeadAndOOOQuerier(mint, maxt, db.head, isoState, headQuerier) } if headQuerier != nil { @@ -2128,8 +2117,9 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer } }() + overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) var headQuerier storage.ChunkQuerier - if maxt >= db.head.MinTime() { + if maxt >= db.head.MinTime() || overlapsOOO { rh := NewRangeHead(db.head, mint, maxt) headQuerier, err = db.blockChunkQuerierFunc(rh, mint, maxt) if err != nil { @@ -2155,21 +2145,10 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer } } - if headQuerier != nil { - if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { - // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. - isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) - headQuerier = NewHeadAndOOOChunkQuerier(mint, maxt, db.head, isoState, headQuerier) - } - } else if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { - rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) - headQuerier, err = db.blockChunkQuerierFunc(rh, mint, maxt) - if err != nil { - // If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead. - rh.isoState.Close() - - return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err) - } + if overlapsOOO { + // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. + isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) + headQuerier = NewHeadAndOOOChunkQuerier(mint, maxt, db.head, isoState, headQuerier) } if headQuerier != nil { diff --git a/tsdb/ooo_head.go b/tsdb/ooo_head.go index 209b14673..0ed9f3648 100644 --- a/tsdb/ooo_head.go +++ b/tsdb/ooo_head.go @@ -14,16 +14,10 @@ package tsdb import ( - "fmt" "sort" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" - - "github.com/oklog/ulid" - - "github.com/prometheus/prometheus/tsdb/chunks" - "github.com/prometheus/prometheus/tsdb/tombstones" ) // OOOChunk maintains samples in time-ascending order. @@ -171,75 +165,3 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error } return chks, nil } - -var _ BlockReader = &OOORangeHead{} - -// OOORangeHead allows querying Head out of order samples via BlockReader -// interface implementation. -type OOORangeHead struct { - head *Head - // mint and maxt are tracked because when a query is handled we only want - // the timerange of the query and having preexisting pointers to the first - // and last timestamp help with that. - mint, maxt int64 - - isoState *oooIsolationState -} - -func NewOOORangeHead(head *Head, mint, maxt int64, minRef chunks.ChunkDiskMapperRef) *OOORangeHead { - isoState := head.oooIso.TrackReadAfter(minRef) - - return &OOORangeHead{ - head: head, - mint: mint, - maxt: maxt, - isoState: isoState, - } -} - -func (oh *OOORangeHead) Index() (IndexReader, error) { - return NewOOOHeadIndexReader(oh.head, oh.mint, oh.maxt, oh.isoState.minRef), nil -} - -func (oh *OOORangeHead) Chunks() (ChunkReader, error) { - return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt, oh.isoState, 0), nil -} - -func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) { - // As stated in the design doc https://docs.google.com/document/d/1Kppm7qL9C-BJB1j6yb6-9ObG3AbdZnFUBYPNNWwDBYM/edit?usp=sharing - // Tombstones are not supported for out of order metrics. - return tombstones.NewMemTombstones(), nil -} - -var oooRangeHeadULID = ulid.MustParse("0000000000XXXX000RANGEHEAD") - -func (oh *OOORangeHead) Meta() BlockMeta { - return BlockMeta{ - MinTime: oh.mint, - MaxTime: oh.maxt, - ULID: oooRangeHeadULID, - Stats: BlockStats{ - NumSeries: oh.head.NumSeries(), - }, - } -} - -// Size returns the size taken by the Head block. -func (oh *OOORangeHead) Size() int64 { - return oh.head.Size() -} - -// String returns an human readable representation of the out of order range -// head. It's important to keep this function in order to avoid the struct dump -// when the head is stringified in errors or logs. -func (oh *OOORangeHead) String() string { - return fmt.Sprintf("ooo range head (mint: %d, maxt: %d)", oh.MinTime(), oh.MaxTime()) -} - -func (oh *OOORangeHead) MinTime() int64 { - return oh.mint -} - -func (oh *OOORangeHead) MaxTime() int64 { - return oh.maxt -} From a299c7b6d61cbbfc898962acb3e88430bd7e048e Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 15 Jul 2024 20:10:17 +0100 Subject: [PATCH 86/99] TSDB: Remove OOOHeadChunkReader Use HeadAndOOOChunkReader instead. Signed-off-by: Bryan Boreham --- tsdb/ooo_head_read.go | 55 +------------------------------------- tsdb/ooo_head_read_test.go | 8 +++--- 2 files changed, 5 insertions(+), 58 deletions(-) diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index f844cfaca..01ba12986 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -248,59 +248,6 @@ func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values } } -type OOOHeadChunkReader struct { - head *Head - mint, maxt int64 - isoState *oooIsolationState - maxMmapRef chunks.ChunkDiskMapperRef -} - -func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *OOOHeadChunkReader { - return &OOOHeadChunkReader{ - head: head, - mint: mint, - maxt: maxt, - isoState: isoState, - maxMmapRef: maxMmapRef, - } -} - -func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { - sid, _ := chunks.HeadChunkRef(meta.Ref).Unpack() - - s := cr.head.series.getByID(sid) - // This means that the series has been garbage collected. - if s == nil { - return nil, nil, storage.ErrNotFound - } - - s.Lock() - if s.ooo == nil { - // There is no OOO data for this series. - s.Unlock() - return nil, nil, storage.ErrNotFound - } - mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, nil, cr.mint, cr.maxt, cr.maxMmapRef) - s.Unlock() - if err != nil { - return nil, nil, err - } - - // This means that the query range did not overlap with the requested chunk. - if len(mc.chunkIterables) == 0 { - return nil, nil, storage.ErrNotFound - } - - return nil, mc, nil -} - -func (cr OOOHeadChunkReader) Close() error { - if cr.isoState != nil { - cr.isoState.Close() - } - return nil -} - type OOOCompactionHead struct { oooIR *OOOHeadIndexReader lastMmapRef chunks.ChunkDiskMapperRef @@ -397,7 +344,7 @@ func (ch *OOOCompactionHead) Index() (IndexReader, error) { } func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) { - return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil, ch.lastMmapRef), nil + return NewHeadAndOOOChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil, nil, ch.lastMmapRef), nil } func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) { diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index b837b9e2f..c0b130ffb 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -481,10 +481,10 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { t.Run("Getting a non existing chunk fails with not found error", func(t *testing.T) { db := newTestDBWithOpts(t, opts) - cr := NewOOOHeadChunkReader(db.head, 0, 1000, nil, 0) + cr := NewHeadAndOOOChunkReader(db.head, 0, 1000, nil, nil, 0) defer cr.Close() c, iterable, err := cr.ChunkOrIterable(chunks.Meta{ - Ref: 0x1000000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300, + Ref: 0x1800000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300, }) require.Nil(t, iterable) require.Equal(t, err, fmt.Errorf("not found")) @@ -839,7 +839,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { require.NoError(t, err) require.Equal(t, len(tc.expChunksSamples), len(chks)) - cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, 0) + cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0) defer cr.Close() for i := 0; i < len(chks); i++ { c, iterable, err := cr.ChunkOrIterable(chks[i]) @@ -1013,7 +1013,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( } require.NoError(t, app.Commit()) - cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, 0) + cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0) defer cr.Close() for i := 0; i < len(chks); i++ { c, iterable, err := cr.ChunkOrIterable(chks[i]) From 26b3de04387b38fc633ba2ce0931fdf65059086d Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 16 Jul 2024 13:56:22 +0100 Subject: [PATCH 87/99] TSDB: Remove OOOHeadIndexReader Use headIndexReader instead. OOOCompactionHeadIndexReader needs to be expanded slightly, because it previously delegated to OOOHeadIndexReader. Signed-off-by: Bryan Boreham --- tsdb/ooo_head_read.go | 127 ++++++++++--------------------------- tsdb/ooo_head_read_test.go | 6 +- 2 files changed, 35 insertions(+), 98 deletions(-) diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 01ba12986..aad1d2fa8 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -30,19 +30,6 @@ import ( "github.com/prometheus/prometheus/util/annotations" ) -var _ IndexReader = &OOOHeadIndexReader{} - -// OOOHeadIndexReader implements IndexReader so ooo samples in the head can be -// accessed. -// It also has a reference to headIndexReader so we can leverage on its -// IndexReader implementation for all the methods that remain the same. We -// decided to do this to avoid code duplication. -// The only methods that change are the ones about getting Series and Postings. -type OOOHeadIndexReader struct { - *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. - lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef -} - var _ chunkenc.Iterable = &mergedOOOChunks{} // mergedOOOChunks holds the list of iterables for overlapping chunks. @@ -54,48 +41,11 @@ func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables) } -func NewOOOHeadIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *OOOHeadIndexReader { - hr := &headIndexReader{ - head: head, - mint: mint, - maxt: maxt, - } - return &OOOHeadIndexReader{hr, lastGarbageCollectedMmapRef} -} - -func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { - return oh.series(ref, builder, chks, oh.lastGarbageCollectedMmapRef, 0) -} - // lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so // any chunk at or before this ref will not be considered. 0 disables this check. // // maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then // the oooHeadChunk will not be considered. -func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef) error { - s := oh.head.series.getByID(chunks.HeadSeriesRef(ref)) - - if s == nil { - oh.head.metrics.seriesNotFound.Inc() - return storage.ErrNotFound - } - builder.Assign(s.labels()) - - if chks == nil { - return nil - } - - s.Lock() - defer s.Unlock() - *chks = (*chks)[:0] - - if s.ooo == nil { - return nil - } - - return getOOOSeriesChunks(s, oh.mint, oh.maxt, lastGarbageCollectedMmapRef, maxMmapRef, false, chks) -} - func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, chks *[]chunks.Meta) error { tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks)) @@ -176,21 +126,6 @@ func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmap return nil } -// LabelValues needs to be overridden from the headIndexReader implementation due -// to the check that happens at the beginning where we make sure that the query -// interval overlaps with the head minooot and maxooot. -func (oh *OOOHeadIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { - if oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxOOOTime() { - return []string{}, nil - } - - if len(matchers) == 0 { - return oh.head.postings.LabelValues(ctx, name), nil - } - - return labelValuesWithMatchers(ctx, oh, name, matchers...) -} - type chunkMetaAndChunkDiskMapperRef struct { meta chunks.Meta ref chunks.ChunkDiskMapperRef @@ -232,24 +167,8 @@ func lessByMinTimeAndMinRef(a, b chunks.Meta) int { } } -func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) { - switch len(values) { - case 0: - return index.EmptyPostings(), nil - case 1: - return oh.head.postings.Get(name, values[0]), nil // TODO(ganesh) Also call GetOOOPostings - default: - // TODO(ganesh) We want to only return postings for out of order series. - res := make([]index.Postings, 0, len(values)) - for _, value := range values { - res = append(res, oh.head.postings.Get(name, value)) // TODO(ganesh) Also call GetOOOPostings - } - return index.Merge(ctx, res...), nil - } -} - type OOOCompactionHead struct { - oooIR *OOOHeadIndexReader + head *Head lastMmapRef chunks.ChunkDiskMapperRef lastWBLFile int postings []storage.SeriesRef @@ -266,6 +185,7 @@ type OOOCompactionHead struct { // on the sample append latency. So call NewOOOCompactionHead only right before compaction. func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, error) { ch := &OOOCompactionHead{ + head: head, chunkRange: head.chunkRange.Load(), mint: math.MaxInt64, maxt: math.MinInt64, @@ -279,15 +199,14 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, ch.lastWBLFile = lastWBLFile } - ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64, 0) + hr := headIndexReader{head: head, mint: ch.mint, maxt: ch.maxt} n, v := index.AllPostingsKey() - - // TODO: verify this gets only ooo samples. - p, err := ch.oooIR.Postings(ctx, n, v) + // TODO: filter to series with OOO samples, before sorting. + p, err := hr.Postings(ctx, n, v) if err != nil { return nil, err } - p = ch.oooIR.SortedPostings(p) + p = hr.SortedPostings(p) var lastSeq, lastOff int for p.Next() { @@ -344,7 +263,7 @@ func (ch *OOOCompactionHead) Index() (IndexReader, error) { } func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) { - return NewHeadAndOOOChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil, nil, ch.lastMmapRef), nil + return NewHeadAndOOOChunkReader(ch.head, ch.mint, ch.maxt, nil, nil, ch.lastMmapRef), nil } func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) { @@ -370,12 +289,12 @@ func (ch *OOOCompactionHead) Meta() BlockMeta { // Only the method of BlockReader interface are valid for the cloned OOOCompactionHead. func (ch *OOOCompactionHead) CloneForTimeRange(mint, maxt int64) *OOOCompactionHead { return &OOOCompactionHead{ - oooIR: NewOOOHeadIndexReader(ch.oooIR.head, mint, maxt, 0), + head: ch.head, lastMmapRef: ch.lastMmapRef, postings: ch.postings, chunkRange: ch.chunkRange, - mint: ch.mint, - maxt: ch.maxt, + mint: mint, + maxt: maxt, } } @@ -395,7 +314,8 @@ func NewOOOCompactionHeadIndexReader(ch *OOOCompactionHead) IndexReader { } func (ir *OOOCompactionHeadIndexReader) Symbols() index.StringIter { - return ir.ch.oooIR.Symbols() + hr := headIndexReader{head: ir.ch.head, mint: ir.ch.mint, maxt: ir.ch.maxt} + return hr.Symbols() } func (ir *OOOCompactionHeadIndexReader) Postings(_ context.Context, name string, values ...string) (index.Postings, error) { @@ -416,11 +336,28 @@ func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.P } func (ir *OOOCompactionHeadIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings { - return ir.ch.oooIR.ShardedPostings(p, shardIndex, shardCount) + hr := headIndexReader{head: ir.ch.head, mint: ir.ch.mint, maxt: ir.ch.maxt} + return hr.ShardedPostings(p, shardIndex, shardCount) } func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { - return ir.ch.oooIR.series(ref, builder, chks, 0, ir.ch.lastMmapRef) + s := ir.ch.head.series.getByID(chunks.HeadSeriesRef(ref)) + + if s == nil { + ir.ch.head.metrics.seriesNotFound.Inc() + return storage.ErrNotFound + } + builder.Assign(s.lset) + + s.Lock() + defer s.Unlock() + *chks = (*chks)[:0] + + if s.ooo == nil { + return nil + } + + return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, chks) } func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { @@ -448,7 +385,7 @@ func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, posti } func (ir *OOOCompactionHeadIndexReader) Close() error { - return ir.ch.oooIR.Close() + return nil } // HeadAndOOOQuerier queries both the head and the out-of-order head. diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index c0b130ffb..f71d49732 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -341,7 +341,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { }) } - ir := NewOOOHeadIndexReader(h, tc.queryMinT, tc.queryMaxT, 0) + ir := NewHeadAndOOOIndexReader(h, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder @@ -832,7 +832,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { // The Series method populates the chunk metas, taking a copy of the // head OOO chunk if necessary. These are then used by the ChunkReader. - ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) + ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder err = ir.Series(s1Ref, &b, &chks) @@ -997,7 +997,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( // The Series method populates the chunk metas, taking a copy of the // head OOO chunk if necessary. These are then used by the ChunkReader. - ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) + ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder err = ir.Series(s1Ref, &b, &chks) From e95607b2765bf9b0492342d08b07c3b5e31089bc Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 16 Jul 2024 14:18:55 +0100 Subject: [PATCH 88/99] TSDB: Lock round access to labels, where necessary Signed-off-by: Bryan Boreham --- tsdb/ooo_head_read.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index aad1d2fa8..e1881aef8 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -347,7 +347,7 @@ func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *l ir.ch.head.metrics.seriesNotFound.Inc() return storage.ErrNotFound } - builder.Assign(s.lset) + builder.Assign(s.labels()) s.Lock() defer s.Unlock() @@ -494,7 +494,7 @@ func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.S oh.head.metrics.seriesNotFound.Inc() return storage.ErrNotFound } - builder.Assign(s.lset) + builder.Assign(s.labels()) if chks == nil { return nil From 7ffd3ca2807326b76d1c2c19dc769163a9280eed Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 16 Jul 2024 14:20:18 +0100 Subject: [PATCH 89/99] TSDB: Cosmetic: move HeadAndOOO implementations where old code was This makes the diffs easier to follow. Signed-off-by: Bryan Boreham --- tsdb/ooo_head_read.go | 221 +++++++++++++++++++++--------------------- 1 file changed, 112 insertions(+), 109 deletions(-) diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index e1881aef8..aaaa24963 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -30,6 +30,13 @@ import ( "github.com/prometheus/prometheus/util/annotations" ) +var _ IndexReader = &HeadAndOOOIndexReader{} + +type HeadAndOOOIndexReader struct { + *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. + lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef +} + var _ chunkenc.Iterable = &mergedOOOChunks{} // mergedOOOChunks holds the list of iterables for overlapping chunks. @@ -41,6 +48,39 @@ func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables) } +func NewHeadAndOOOIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader { + hr := &headIndexReader{ + head: head, + mint: mint, + maxt: maxt, + } + return &HeadAndOOOIndexReader{hr, lastGarbageCollectedMmapRef} +} + +func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { + s := oh.head.series.getByID(chunks.HeadSeriesRef(ref)) + + if s == nil { + oh.head.metrics.seriesNotFound.Inc() + return storage.ErrNotFound + } + builder.Assign(s.labels()) + + if chks == nil { + return nil + } + + s.Lock() + defer s.Unlock() + *chks = (*chks)[:0] + + if s.ooo != nil { + return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, chks) + } + getSeriesChunks(s, oh.mint, oh.maxt, chks) + return nil +} + // lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so // any chunk at or before this ref will not be considered. 0 disables this check. // @@ -126,6 +166,20 @@ func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmap return nil } +// LabelValues needs to be overridden from the headIndexReader implementation +// so we can return labels within either in-order range or ooo range. +func (oh *HeadAndOOOIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { + if oh.maxt < oh.head.MinTime() && oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxTime() && oh.mint > oh.head.MaxOOOTime() { + return []string{}, nil + } + + if len(matchers) == 0 { + return oh.head.postings.LabelValues(ctx, name), nil + } + + return labelValuesWithMatchers(ctx, oh, name, matchers...) +} + type chunkMetaAndChunkDiskMapperRef struct { meta chunks.Meta ref chunks.ChunkDiskMapperRef @@ -167,6 +221,64 @@ func lessByMinTimeAndMinRef(a, b chunks.Meta) int { } } +type HeadAndOOOChunkReader struct { + head *Head + mint, maxt int64 + cr *headChunkReader // If nil, only read OOO chunks. + maxMmapRef chunks.ChunkDiskMapperRef + oooIsoState *oooIsolationState +} + +func NewHeadAndOOOChunkReader(head *Head, mint, maxt int64, cr *headChunkReader, oooIsoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOChunkReader { + return &HeadAndOOOChunkReader{ + head: head, + mint: mint, + maxt: maxt, + cr: cr, + maxMmapRef: maxMmapRef, + oooIsoState: oooIsoState, + } +} + +func (cr *HeadAndOOOChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { + sid, _, isOOO := unpackHeadChunkRef(meta.Ref) + if !isOOO { + return cr.cr.ChunkOrIterable(meta) + } + + s := cr.head.series.getByID(sid) + // This means that the series has been garbage collected. + if s == nil { + return nil, nil, storage.ErrNotFound + } + + s.Lock() + mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.cr, cr.mint, cr.maxt, cr.maxMmapRef) + s.Unlock() + + return nil, mc, err +} + +// Pass through special behaviour for current head chunk. +func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) { + _, _, isOOO := unpackHeadChunkRef(meta.Ref) + if !isOOO { + return cr.cr.ChunkOrIterableWithCopy(meta) + } + chk, iter, err := cr.ChunkOrIterable(meta) + return chk, iter, 0, err +} + +func (cr *HeadAndOOOChunkReader) Close() error { + if cr.cr != nil && cr.cr.isoState != nil { + cr.cr.isoState.Close() + } + if cr.oooIsoState != nil { + cr.oooIsoState.Close() + } + return nil +} + type OOOCompactionHead struct { head *Head lastMmapRef chunks.ChunkDiskMapperRef @@ -473,112 +585,3 @@ func (q *HeadAndOOOChunkQuerier) Close() error { func (q *HeadAndOOOChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { return selectChunkSeriesSet(ctx, sortSeries, hints, matchers, rangeHeadULID, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt) } - -type HeadAndOOOIndexReader struct { - *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. - lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef -} - -func NewHeadAndOOOIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader { - hr := &headIndexReader{ - head: head, - mint: mint, - maxt: maxt, - } - return &HeadAndOOOIndexReader{hr, lastGarbageCollectedMmapRef} -} - -func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { - s := oh.head.series.getByID(chunks.HeadSeriesRef(ref)) - if s == nil { - oh.head.metrics.seriesNotFound.Inc() - return storage.ErrNotFound - } - builder.Assign(s.labels()) - - if chks == nil { - return nil - } - - s.Lock() - defer s.Unlock() - *chks = (*chks)[:0] - - if s.ooo != nil { - return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, chks) - } - getSeriesChunks(s, oh.mint, oh.maxt, chks) - return nil -} - -// LabelValues needs to be overridden from the headIndexReader implementation -// so we can return labels within either in-order range or ooo range. -func (oh *HeadAndOOOIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { - if oh.maxt < oh.head.MinTime() && oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxTime() && oh.mint > oh.head.MaxOOOTime() { - return []string{}, nil - } - - if len(matchers) == 0 { - return oh.head.postings.LabelValues(ctx, name), nil - } - - return labelValuesWithMatchers(ctx, oh, name, matchers...) -} - -type HeadAndOOOChunkReader struct { - head *Head - mint, maxt int64 - cr *headChunkReader // If nil, only read OOO chunks. - maxMmapRef chunks.ChunkDiskMapperRef - oooIsoState *oooIsolationState -} - -func NewHeadAndOOOChunkReader(head *Head, mint, maxt int64, cr *headChunkReader, oooIsoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOChunkReader { - return &HeadAndOOOChunkReader{ - head: head, - mint: mint, - maxt: maxt, - cr: cr, - maxMmapRef: maxMmapRef, - oooIsoState: oooIsoState, - } -} - -func (cr *HeadAndOOOChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { - sid, _, isOOO := unpackHeadChunkRef(meta.Ref) - if !isOOO { - return cr.cr.ChunkOrIterable(meta) - } - - s := cr.head.series.getByID(sid) - // This means that the series has been garbage collected. - if s == nil { - return nil, nil, storage.ErrNotFound - } - - s.Lock() - mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.cr, cr.mint, cr.maxt, cr.maxMmapRef) - s.Unlock() - - return nil, mc, err -} - -// Pass through special behaviour for current head chunk. -func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) { - _, _, isOOO := unpackHeadChunkRef(meta.Ref) - if !isOOO { - return cr.cr.ChunkOrIterableWithCopy(meta) - } - chk, iter, err := cr.ChunkOrIterable(meta) - return chk, iter, 0, err -} - -func (cr *HeadAndOOOChunkReader) Close() error { - if cr.cr != nil && cr.cr.isoState != nil { - cr.cr.isoState.Close() - } - if cr.oooIsoState != nil { - cr.oooIsoState.Close() - } - return nil -} From 9135da1e4f24850008493c3b27f866123c761bdb Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 12 Aug 2024 17:14:41 +0100 Subject: [PATCH 90/99] TSDB: Review feedback Signed-off-by: Bryan Boreham * Re-enable check in `createHeadWithOOOSamples` which wasn't really broken. * Move code making `Block` into a `Queryable` into test file. * Make `getSeriesChunks` return a slice (renamed `appendSeriesChunks`). * Rename `oooMergedChunks` to `mergedChunks`. * Improve comment on `ChunkOrIterableWithCopy`. * Name return values from unpackHeadChunkRef. Co-authored-by: Oleg Zaytsev Signed-off-by: Bryan Boreham --- tsdb/block.go | 5 ----- tsdb/head_read.go | 19 +++++++++---------- tsdb/ooo_head_read.go | 9 +++++---- tsdb/querier_bench_test.go | 9 ++++++++- 4 files changed, 22 insertions(+), 20 deletions(-) diff --git a/tsdb/block.go b/tsdb/block.go index c55e22ce5..2f32733f8 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -467,11 +467,6 @@ func (pb *Block) setCompactionFailed() error { return nil } -// Querier implements Queryable. -func (pb *Block) Querier(mint, maxt int64) (storage.Querier, error) { - return NewBlockQuerier(pb, mint, maxt) -} - type blockIndexReader struct { ir IndexReader b *Block diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 977d6b978..47f12df99 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -199,19 +199,18 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB defer s.Unlock() *chks = (*chks)[:0] - - getSeriesChunks(s, h.mint, h.maxt, chks) + *chks = appendSeriesChunks(s, h.mint, h.maxt, *chks) return nil } -func getSeriesChunks(s *memSeries, mint, maxt int64, chks *[]chunks.Meta) { +func appendSeriesChunks(s *memSeries, mint, maxt int64, chks []chunks.Meta) []chunks.Meta { for i, c := range s.mmappedChunks { // Do not expose chunks that are outside of the specified range. if !c.OverlapsClosedInterval(mint, maxt) { continue } - *chks = append(*chks, chunks.Meta{ + chks = append(chks, chunks.Meta{ MinTime: c.minTime, MaxTime: c.maxTime, Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(i))), @@ -230,7 +229,7 @@ func getSeriesChunks(s *memSeries, mint, maxt int64, chks *[]chunks.Meta) { maxTime = chk.maxTime } if chk.OverlapsClosedInterval(mint, maxt) { - *chks = append(*chks, chunks.Meta{ + chks = append(chks, chunks.Meta{ MinTime: chk.minTime, MaxTime: maxTime, Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(len(s.mmappedChunks)+j))), @@ -239,6 +238,7 @@ func getSeriesChunks(s *memSeries, mint, maxt int64, chks *[]chunks.Meta) { j++ } } + return chks } // headChunkID returns the HeadChunkID referred to by the given position. @@ -259,7 +259,7 @@ func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID { return (chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID) | oooChunkIDMask } -func unpackHeadChunkRef(ref chunks.ChunkRef) (chunks.HeadSeriesRef, chunks.HeadChunkID, bool) { +func unpackHeadChunkRef(ref chunks.ChunkRef) (seriesID chunks.HeadSeriesRef, chunkID chunks.HeadChunkID, isOOO bool) { sid, cid := chunks.HeadChunkRef(ref).Unpack() return sid, (cid & (oooChunkIDMask - 1)), (cid & oooChunkIDMask) != 0 } @@ -481,14 +481,14 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi return elem, true, offset == 0, nil } -// oooMergedChunks return an iterable over one or more OOO chunks for the given +// mergedChunks return an iterable over one or more OOO chunks for the given // chunks.Meta reference from memory or by m-mapping it from the disk. The // returned iterable will be a merge of all the overlapping chunks, if any, // amongst all the chunks in the OOOHead. // If hr is non-nil then in-order chunks are included. // This function is not thread safe unless the caller holds a lock. // The caller must ensure that s.ooo is not nil. -func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, hr *headChunkReader, mint, maxt int64, maxMmapRef chunks.ChunkDiskMapperRef) (*mergedOOOChunks, error) { +func (s *memSeries) mergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, hr *headChunkReader, mint, maxt int64, maxMmapRef chunks.ChunkDiskMapperRef) (chunkenc.Iterable, error) { _, cid, _ := unpackHeadChunkRef(meta.Ref) // ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are @@ -531,8 +531,7 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe } if hr != nil { // Include in-order chunks. - var metas []chunks.Meta - getSeriesChunks(s, max(meta.MinTime, mint), min(meta.MaxTime, maxt), &metas) + metas := appendSeriesChunks(s, max(meta.MinTime, mint), min(meta.MaxTime, maxt), nil) for _, m := range metas { tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ meta: m, diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index aaaa24963..47e2efb86 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -77,7 +77,7 @@ func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.S if s.ooo != nil { return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, chks) } - getSeriesChunks(s, oh.mint, oh.maxt, chks) + *chks = appendSeriesChunks(s, oh.mint, oh.maxt, *chks) return nil } @@ -127,7 +127,7 @@ func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmap } if includeInOrder { - getSeriesChunks(s, mint, maxt, &tmpChks) + tmpChks = appendSeriesChunks(s, mint, maxt, tmpChks) } // There is nothing to do if we did not collect any chunk. @@ -253,13 +253,14 @@ func (cr *HeadAndOOOChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chu } s.Lock() - mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.cr, cr.mint, cr.maxt, cr.maxMmapRef) + mc, err := s.mergedChunks(meta, cr.head.chunkDiskMapper, cr.cr, cr.mint, cr.maxt, cr.maxMmapRef) s.Unlock() return nil, mc, err } -// Pass through special behaviour for current head chunk. +// ChunkOrIterableWithCopy: implements ChunkReaderWithCopy. The special Copy behaviour +// is only implemented for the in-order head chunk. func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) { _, _, isOOO := unpackHeadChunkRef(meta.Ref) if !isOOO { diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index e3e457d07..43accc253 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -321,10 +321,17 @@ func BenchmarkQuerierSelect(b *testing.B) { require.NoError(b, block.Close()) }() - benchmarkSelect(b, block, numSeries, false) + benchmarkSelect(b, (*queryableBlock)(block), numSeries, false) }) } +// Type wrapper to let a Block be a Queryable in benchmarkSelect(). +type queryableBlock Block + +func (pb *queryableBlock) Querier(mint, maxt int64) (storage.Querier, error) { + return NewBlockQuerier((*Block)(pb), mint, maxt) +} + func BenchmarkQuerierSelectWithOutOfOrder(b *testing.B) { numSeries := 1000000 _, db := createHeadForBenchmarkSelect(b, numSeries, func(app storage.Appender, i int) { From 512c67ec26e764e7adb4d2746ecf71d2222701f5 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 12 Aug 2024 18:49:00 +0100 Subject: [PATCH 91/99] TSDB: Never go over maximum number of OOO chunks In `mmapCurrentOOOHeadChunk`, check if the number is at the maximum and drop the data with an error log. This is not expected to happen as the maximum is over 8 million; that's 8 years of 1 sample every second. Signed-off-by: Bryan Boreham --- tsdb/head_append.go | 20 +++++++++++++------- tsdb/head_wal.go | 2 +- tsdb/ooo_head_read.go | 2 +- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 59681b8da..b66ac7278 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -19,6 +19,7 @@ import ( "fmt" "math" + "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/prometheus/model/exemplar" @@ -936,7 +937,7 @@ func (a *headAppender) Commit() (err error) { // Sample is OOO and OOO handling is enabled // and the delta is within the OOO tolerance. var mmapRefs []chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, oooCapMax) + ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger) if chunkCreated { r, ok := oooMmapMarkers[series.ref] if !ok || r != nil { @@ -1083,14 +1084,14 @@ func (a *headAppender) Commit() (err error) { } // insert is like append, except it inserts. Used for OOO samples. -func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { +func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64, logger log.Logger) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { if s.ooo == nil { s.ooo = &memSeriesOOOFields{} } c := s.ooo.oooHeadChunk if c == nil || c.chunk.NumSamples() == int(oooCapMax) { // Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks. - c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper) + c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper, logger) chunkCreated = true } @@ -1444,9 +1445,9 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange } // cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk. -// The caller must ensure that s.ooo is not nil. -func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) { - ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper) +// The caller must ensure that s is locked and s.ooo is not nil. +func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) { + ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper, logger) s.ooo.oooHeadChunk = &oooHeadChunk{ chunk: NewOOOChunk(), @@ -1457,7 +1458,8 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk return s.ooo.oooHeadChunk, ref } -func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) []chunks.ChunkDiskMapperRef { +// s must be locked when calling. +func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) []chunks.ChunkDiskMapperRef { if s.ooo == nil || s.ooo.oooHeadChunk == nil { // OOO is not enabled or there is no head chunk, so nothing to m-map here. return nil @@ -1469,6 +1471,10 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap } chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, 1) for _, memchunk := range chks { + if len(s.ooo.oooMmappedChunks) >= (oooChunkIDMask - 1) { + level.Error(logger).Log("msg", "Too many OOO chunks, dropping data", "series", s.lset.String()) + break + } chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, memchunk.chunk, true, handleChunkWriteError) chunkRefs = append(chunkRefs, chunkRef) s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{ diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 85b0c656d..7397bbf41 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -890,7 +890,7 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) { unknownRefs++ continue } - ok, chunkCreated, _ := ms.insert(s.T, s.V, nil, nil, h.chunkDiskMapper, oooCapMax) + ok, chunkCreated, _ := ms.insert(s.T, s.V, nil, nil, h.chunkDiskMapper, oooCapMax, h.logger) if chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 47e2efb86..55e241fd9 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -340,7 +340,7 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, } var lastMmapRef chunks.ChunkDiskMapperRef - mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper) + mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper, head.logger) if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 { // Nothing was m-mapped. So take the mmapRef from the existing slice if it exists. mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref} From 1800af54f0dce09c37fba6d7f61e14c9621b8321 Mon Sep 17 00:00:00 2001 From: cuiweiyuan Date: Thu, 8 Aug 2024 15:08:10 +0800 Subject: [PATCH 92/99] chore: fix some function names Signed-off-by: cuiweiyuan --- discovery/kubernetes/endpoints_test.go | 2 +- promql/engine.go | 2 +- storage/series_test.go | 2 +- tsdb/head_test.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index e877657db..3ea98c5db 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -970,7 +970,7 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) { }.Run(t) } -// TestEndpointsUpdatePod makes sure that Endpoints discovery detects underlying Pods changes. +// TestEndpointsDiscoveryUpdatePod makes sure that Endpoints discovery detects underlying Pods changes. // See https://github.com/prometheus/prometheus/issues/11305 for more details. func TestEndpointsDiscoveryUpdatePod(t *testing.T) { pod := &v1.Pod{ diff --git a/promql/engine.go b/promql/engine.go index 1427302e5..a118672cf 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3161,7 +3161,7 @@ seriesLoop: return mat, annos } -// aggregationK evaluates count_values on vec. +// aggregationCountValues evaluates count_values on vec. // Outputs as many series per group as there are values in the input. func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) { type groupCount struct { diff --git a/storage/series_test.go b/storage/series_test.go index 51886f409..f8ba2af67 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -72,7 +72,7 @@ func TestListSeriesIterator(t *testing.T) { require.Equal(t, chunkenc.ValNone, it.Seek(2)) } -// TestSeriesSetToChunkSet test the property of SeriesSet that says +// TestChunkSeriesSetToSeriesSet test the property of SeriesSet that says // returned series should be iterable even after Next is called. func TestChunkSeriesSetToSeriesSet(t *testing.T) { series := []struct { diff --git a/tsdb/head_test.go b/tsdb/head_test.go index fb73a3638..f73ce38ba 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -5695,7 +5695,7 @@ func TestCuttingNewHeadChunks(t *testing.T) { } } -// TestHeadDetectsDuplcateSampleAtSizeLimit tests a regression where a duplicate sample +// TestHeadDetectsDuplicateSampleAtSizeLimit tests a regression where a duplicate sample // is appended to the head, right when the head chunk is at the size limit. // The test adds all samples as duplicate, thus expecting that the result has // exactly half of the samples. From 9e7308de386a23a5d3c8fd2ba28ca7aa55606bc7 Mon Sep 17 00:00:00 2001 From: Owen Williams Date: Thu, 18 Jul 2024 14:08:21 -0400 Subject: [PATCH 93/99] feat(utf8): utf8 content negotation and flags Signed-off-by: Owen Williams --- cmd/prometheus/main.go | 17 ++++++++++++++- config/config.go | 21 ++++++++++++++++++ docs/command-line/prometheus.md | 3 ++- docs/configuration/configuration.md | 10 +++++++++ docs/feature_flags.md | 8 +++++++ scrape/manager.go | 2 ++ scrape/scrape.go | 23 ++++++++++++++++---- scrape/scrape_test.go | 33 ++++++++++++++++++++++------- 8 files changed, 103 insertions(+), 14 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 51320c661..94924d2c4 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -152,6 +152,7 @@ type flagConfig struct { queryConcurrency int queryMaxSamples int RemoteFlushDeadline model.Duration + nameEscapingScheme string featureList []string memlimitRatio float64 @@ -237,6 +238,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "delayed-compaction": c.tsdb.EnableDelayedCompaction = true level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.") + case "utf8-names": + model.NameValidationScheme = model.UTF8Validation + level.Info(logger).Log("msg", "Experimental UTF-8 support enabled") case "": continue case "promql-at-modifier", "promql-negative-offset": @@ -481,7 +485,9 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("scrape.name-escaping-scheme", `Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots".`).Default(scrape.DefaultNameEscapingScheme.String()).StringVar(&cfg.nameEscapingScheme) + + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) promlogflag.AddFlags(a, &cfg.promlogConfig) @@ -509,6 +515,15 @@ func main() { os.Exit(1) } + if cfg.nameEscapingScheme != "" { + scheme, err := model.ToEscapingScheme(cfg.nameEscapingScheme) + if err != nil { + fmt.Fprintf(os.Stderr, `Invalid name escaping scheme: %q; Needs to be one of "values", "underscores", or "dots"`, cfg.nameEscapingScheme) + os.Exit(1) + } + model.NameEscapingScheme = scheme + } + if agentMode && len(serverOnlyFlags) > 0 { fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags) os.Exit(3) diff --git a/config/config.go b/config/config.go index 7632a444f..4326b0a99 100644 --- a/config/config.go +++ b/config/config.go @@ -67,6 +67,11 @@ var ( } ) +const ( + LegacyValidationConfig = "legacy" + UTF8ValidationConfig = "utf8" +) + // Load parses the YAML input s into a Config. func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) { cfg := &Config{} @@ -446,6 +451,8 @@ type GlobalConfig struct { // Keep no more than this many dropped targets per job. // 0 means no limit. KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` + // Allow UTF8 Metric and Label Names. + MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"` } // ScrapeProtocol represents supported protocol for scraping metrics. @@ -471,6 +478,7 @@ var ( PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4" OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1" OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0" + UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8 ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", @@ -656,6 +664,8 @@ type ScrapeConfig struct { // Keep no more than this many dropped targets per job. // 0 means no limit. KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` + // Allow UTF8 Metric and Label Names. + MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -762,6 +772,17 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName) } + switch globalConfig.MetricNameValidationScheme { + case "", LegacyValidationConfig: + case UTF8ValidationConfig: + if model.NameValidationScheme != model.UTF8Validation { + return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names") + } + default: + return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme) + } + c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme + return nil } diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index b8f2e4241..a16e807e1 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -56,7 +56,8 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --scrape.name-escaping-scheme | Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots". | `values` | +| --enable-feature | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 313a7f2f3..bc684b6f9 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -121,6 +121,11 @@ global: # that will be kept in memory. 0 means no limit. [ keep_dropped_targets: | default = 0 ] + # Specifies the validation scheme for metric and label names. Either blank or + # "legacy" for letters, numbers, colons, and underscores; or "utf8" for full + # UTF-8 support. + [ metric_name_validation_scheme | default "legacy" ] + runtime: # Configure the Go garbage collector GOGC parameter # See: https://tip.golang.org/doc/gc-guide#GOGC @@ -461,6 +466,11 @@ metric_relabel_configs: # that will be kept in memory. 0 means no limit. [ keep_dropped_targets: | default = 0 ] +# Specifies the validation scheme for metric and label names. Either blank or +# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full +# UTF-8 support. +[ metric_name_validation_scheme | default "legacy" ] + # Limit on total number of positive and negative buckets allowed in a single # native histogram. The resolution of a histogram with more buckets will be # reduced until the number of buckets is within the limit. If the limit cannot diff --git a/docs/feature_flags.md b/docs/feature_flags.md index c9a3558fa..0a908bb91 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -249,3 +249,11 @@ In the event of multiple consecutive Head compactions being possible, only the f Note that during this delay, the Head continues its usual operations, which include serving and appending series. Despite the delay in compaction, the blocks produced are time-aligned in the same manner as they would be if the delay was not in place. + +## UTF-8 Name Support + +`--enable-feature=utf8-names` + +When enabled, changes the metric and label name validation scheme inside Prometheus to allow the full UTF-8 character set. +By itself, this flag does not enable the request of UTF-8 names via content negotiation. +Users will also have to set `metric_name_validation_scheme` in scrape configs to enable the feature either on the global config or on a per-scrape config basis. diff --git a/scrape/manager.go b/scrape/manager.go index 156e949f8..6d4e8707b 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -93,6 +93,8 @@ type Options struct { skipOffsetting bool } +const DefaultNameEscapingScheme = model.ValueEncodingEscaping + // Manager maintains a set of scrape pools and manages start/stop cycles // when receiving new target groups from the discovery manager. type Manager struct { diff --git a/scrape/scrape.go b/scrape/scrape.go index 68411a62e..9979f7361 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -303,6 +303,11 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { mrc = sp.config.MetricRelabelConfigs ) + validationScheme := model.LegacyValidation + if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig { + validationScheme = model.UTF8Validation + } + sp.targetMtx.Lock() forcedErr := sp.refreshTargetLimitErr() @@ -323,7 +328,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, - acceptHeader: acceptHeader(sp.config.ScrapeProtocols), + acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme), acceptEncodingHeader: acceptEncodingHeader(enableCompression), } newLoop = sp.newLoop(scrapeLoopOptions{ @@ -452,6 +457,11 @@ func (sp *scrapePool) sync(targets []*Target) { scrapeClassicHistograms = sp.config.ScrapeClassicHistograms ) + validationScheme := model.LegacyValidation + if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig { + validationScheme = model.UTF8Validation + } + sp.targetMtx.Lock() for _, t := range targets { hash := t.hash() @@ -467,7 +477,7 @@ func (sp *scrapePool) sync(targets []*Target) { client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, - acceptHeader: acceptHeader(sp.config.ScrapeProtocols), + acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme), acceptEncodingHeader: acceptEncodingHeader(enableCompression), metrics: sp.metrics, } @@ -714,11 +724,16 @@ var errBodySizeLimit = errors.New("body size limit exceeded") // acceptHeader transforms preference from the options into specific header values as // https://www.rfc-editor.org/rfc/rfc9110.html#name-accept defines. // No validation is here, we expect scrape protocols to be validated already. -func acceptHeader(sps []config.ScrapeProtocol) string { +func acceptHeader(sps []config.ScrapeProtocol, scheme model.ValidationScheme) string { var vals []string weight := len(config.ScrapeProtocolsHeaders) + 1 for _, sp := range sps { - vals = append(vals, fmt.Sprintf("%s;q=0.%d", config.ScrapeProtocolsHeaders[sp], weight)) + val := config.ScrapeProtocolsHeaders[sp] + if scheme == model.UTF8Validation { + val += ";" + config.UTF8NamesHeader + } + val += fmt.Sprintf(";q=0.%d", weight) + vals = append(vals, val) weight-- } // Default match anything. diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index a3fe6ac1a..be81b8677 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -2339,11 +2339,15 @@ func TestTargetScraperScrapeOK(t *testing.T) { ) var protobufParsing bool + var allowUTF8 bool server := httptest.NewServer( http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + accept := r.Header.Get("Accept") + if allowUTF8 { + require.Truef(t, strings.Contains(accept, "escaping=allow-utf-8"), "Expected Accept header to allow utf8, got %q", accept) + } if protobufParsing { - accept := r.Header.Get("Accept") require.True(t, strings.HasPrefix(accept, "application/vnd.google.protobuf;"), "Expected Accept header to prefer application/vnd.google.protobuf.") } @@ -2351,7 +2355,11 @@ func TestTargetScraperScrapeOK(t *testing.T) { timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds") require.Equal(t, expectedTimeout, timeout, "Expected scrape timeout header.") - w.Header().Set("Content-Type", `text/plain; version=0.0.4`) + if allowUTF8 { + w.Header().Set("Content-Type", `text/plain; version=1.0.0; escaping=allow-utf-8`) + } else { + w.Header().Set("Content-Type", `text/plain; version=0.0.4`) + } w.Write([]byte("metric_a 1\nmetric_b 2\n")) }), ) @@ -2380,13 +2388,22 @@ func TestTargetScraperScrapeOK(t *testing.T) { require.NoError(t, err) contentType, err := ts.readResponse(context.Background(), resp, &buf) require.NoError(t, err) - require.Equal(t, "text/plain; version=0.0.4", contentType) + if allowUTF8 { + require.Equal(t, "text/plain; version=1.0.0; escaping=allow-utf-8", contentType) + } else { + require.Equal(t, "text/plain; version=0.0.4", contentType) + } require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String()) } - runTest(acceptHeader(config.DefaultScrapeProtocols)) + runTest(acceptHeader(config.DefaultScrapeProtocols, model.LegacyValidation)) protobufParsing = true - runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols)) + runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.LegacyValidation)) + protobufParsing = false + allowUTF8 = true + runTest(acceptHeader(config.DefaultScrapeProtocols, model.UTF8Validation)) + protobufParsing = true + runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.UTF8Validation)) } func TestTargetScrapeScrapeCancel(t *testing.T) { @@ -2412,7 +2429,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) { ), }, client: http.DefaultClient, - acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols), + acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation), } ctx, cancel := context.WithCancel(context.Background()) @@ -2467,7 +2484,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) { ), }, client: http.DefaultClient, - acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols), + acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation), } resp, err := ts.scrape(context.Background()) @@ -2511,7 +2528,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) { }, client: http.DefaultClient, bodySizeLimit: bodySizeLimit, - acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols), + acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation), metrics: newTestScrapeMetrics(t), } var buf bytes.Buffer From 3b0a386c62513e0aaed04cd4d97beb9a9a8ab580 Mon Sep 17 00:00:00 2001 From: harshitasao Date: Sun, 18 Aug 2024 13:15:28 +0530 Subject: [PATCH 94/99] fix: fixed the vulnerability Signed-off-by: harshitasao --- documentation/examples/remote_storage/go.mod | 4 ++-- documentation/examples/remote_storage/go.sum | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 35dca85a0..bab39303d 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -20,7 +20,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect - github.com/aws/aws-sdk-go v1.53.16 // indirect + github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -62,7 +62,7 @@ require ( golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/grpc v1.64.0 // indirect + google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 4c420092f..6e283cc74 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc= -github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -37,8 +37,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -402,8 +402,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1: google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 3a78e76282d47887a253a0fd19524ca49c4cabce Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Sun, 18 Aug 2024 11:27:04 +0200 Subject: [PATCH 95/99] Upgrade golangci-lint to v1.60.1 Signed-off-by: Arve Knudsen --- .github/workflows/ci.yml | 2 +- Makefile.common | 2 +- cmd/promtool/tsdb.go | 8 +- discovery/eureka/client.go | 1 - discovery/hetzner/robot.go | 1 - discovery/kubernetes/kubernetes_test.go | 8 +- notifier/notifier.go | 2 +- promql/functions.go | 32 +++---- promql/parser/lex.go | 12 +-- scripts/golangci-lint.yml | 2 +- storage/buffer_test.go | 16 ++-- storage/remote/client.go | 3 - storage/remote/queue_manager.go | 8 +- storage/remote/write_handler_test.go | 8 +- template/template.go | 2 +- tsdb/chunks/queue_test.go | 8 +- tsdb/db.go | 16 ++-- tsdb/db_test.go | 108 ++++++++++++------------ tsdb/head_append.go | 8 +- tsdb/head_test.go | 4 +- tsdb/index/postingsstats_test.go | 16 ++-- tsdb/wlog/wlog.go | 10 +-- util/testutil/directory.go | 2 +- web/web.go | 4 +- 24 files changed, 139 insertions(+), 144 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 92bbaebbf..c3a1d68e9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -186,7 +186,7 @@ jobs: with: args: --verbose # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. - version: v1.59.1 + version: v1.60.1 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' diff --git a/Makefile.common b/Makefile.common index e3da72ab4..2ecd5465c 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.59.1 +GOLANGCI_LINT_VERSION ?= v1.60.1 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 2ed7244b1..b85a4fae8 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -866,16 +866,16 @@ func displayHistogram(dataType string, datas []int, total int) { fmt.Println() } -func generateBucket(min, max int) (start, end, step int) { - s := (max - min) / 10 +func generateBucket(minVal, maxVal int) (start, end, step int) { + s := (maxVal - minVal) / 10 step = 10 for step < s && step <= 10000 { step *= 10 } - start = min - min%step - end = max - max%step + step + start = minVal - minVal%step + end = maxVal - maxVal%step + step return } diff --git a/discovery/eureka/client.go b/discovery/eureka/client.go index 5a90968f1..52e8ce7b4 100644 --- a/discovery/eureka/client.go +++ b/discovery/eureka/client.go @@ -97,7 +97,6 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic resp.Body.Close() }() - //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode) } diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 64155bfae..516470b05 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -87,7 +87,6 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) resp.Body.Close() }() - //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode) } diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index 552f8a445..50f25a20a 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -154,7 +154,7 @@ func (d k8sDiscoveryTest) Run(t *testing.T) { // readResultWithTimeout reads all targetgroups from channel with timeout. // It merges targetgroups by source and sends the result to result channel. -func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, max int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) { +func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, maxGroups int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) { res := make(map[string]*targetgroup.Group) timeout := time.After(stopAfter) Loop: @@ -167,7 +167,7 @@ Loop: } res[tg.Source] = tg } - if len(res) == max { + if len(res) == maxGroups { // Reached max target groups we may get, break fast. break Loop } @@ -175,10 +175,10 @@ Loop: // Because we use queue, an object that is created then // deleted or updated may be processed only once. // So possibly we may skip events, timed out here. - t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), max) + t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), maxGroups) break Loop case <-ctx.Done(): - t.Logf("stopped, got %d (max: %d) items", len(res), max) + t.Logf("stopped, got %d (max: %d) items", len(res), maxGroups) break Loop } } diff --git a/notifier/notifier.go b/notifier/notifier.go index 68b0d4961..2ebfbbee5 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -674,7 +674,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b }() // Any HTTP status 2xx is OK. - //nolint:usestdlibvars + if resp.StatusCode/100 != 2 { return fmt.Errorf("bad response status %s", resp.Status) } diff --git a/promql/functions.go b/promql/functions.go index 018023bf0..2af06c174 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -467,15 +467,15 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *Eval // === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].F - max := vals[2].(Vector)[0].F - if max < min { + minVal := vals[1].(Vector)[0].F + maxVal := vals[2].(Vector)[0].F + if maxVal < minVal { return enh.Out, nil } for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: el.Metric.DropMetricName(), - F: math.Max(min, math.Min(max, el.F)), + F: math.Max(minVal, math.Min(maxVal, el.F)), }) } return enh.Out, nil @@ -484,11 +484,11 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - max := vals[1].(Vector)[0].F + maxVal := vals[1].(Vector)[0].F for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: el.Metric.DropMetricName(), - F: math.Min(max, el.F), + F: math.Min(maxVal, el.F), }) } return enh.Out, nil @@ -497,11 +497,11 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].F + minVal := vals[1].(Vector)[0].F for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: el.Metric.DropMetricName(), - F: math.Max(min, el.F), + F: math.Max(minVal, el.F), }) } return enh.Out, nil @@ -700,13 +700,13 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { - max := s.Floats[0].F + maxVal := s.Floats[0].F for _, f := range s.Floats { - if f.F > max || math.IsNaN(max) { - max = f.F + if f.F > maxVal || math.IsNaN(maxVal) { + maxVal = f.F } } - return max + return maxVal }), nil } @@ -720,13 +720,13 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { - min := s.Floats[0].F + minVal := s.Floats[0].F for _, f := range s.Floats { - if f.F < min || math.IsNaN(min) { - min = f.F + if f.F < minVal || math.IsNaN(minVal) { + minVal = f.F } } - return min + return minVal }), nil } diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 9b88ab556..0cefa30c8 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -727,23 +727,23 @@ func lexValueSequence(l *Lexer) stateFn { // was only modified to integrate with our lexer. func lexEscape(l *Lexer) stateFn { var n int - var base, max uint32 + var base, maxVal uint32 ch := l.next() switch ch { case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', l.stringOpen: return lexString case '0', '1', '2', '3', '4', '5', '6', '7': - n, base, max = 3, 8, 255 + n, base, maxVal = 3, 8, 255 case 'x': ch = l.next() - n, base, max = 2, 16, 255 + n, base, maxVal = 2, 16, 255 case 'u': ch = l.next() - n, base, max = 4, 16, unicode.MaxRune + n, base, maxVal = 4, 16, unicode.MaxRune case 'U': ch = l.next() - n, base, max = 8, 16, unicode.MaxRune + n, base, maxVal = 8, 16, unicode.MaxRune case eof: l.errorf("escape sequence not terminated") return lexString @@ -772,7 +772,7 @@ func lexEscape(l *Lexer) stateFn { } } - if x > max || 0xD800 <= x && x < 0xE000 { + if x > maxVal || 0xD800 <= x && x < 0xE000 { l.errorf("escape sequence is an invalid Unicode code point") } return lexString diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 746831a86..fc0f9c654 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -36,4 +36,4 @@ jobs: uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 with: args: --verbose - version: v1.59.1 + version: v1.60.1 diff --git a/storage/buffer_test.go b/storage/buffer_test.go index 1b24e5da2..b5c6443ac 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -96,10 +96,10 @@ func TestSampleRingMixed(t *testing.T) { // With ValNone as the preferred type, nothing should be initialized. r := newSampleRing(10, 2, chunkenc.ValNone) - require.Zero(t, len(r.fBuf)) - require.Zero(t, len(r.hBuf)) - require.Zero(t, len(r.fhBuf)) - require.Zero(t, len(r.iBuf)) + require.Empty(t, r.fBuf) + require.Empty(t, r.hBuf) + require.Empty(t, r.fhBuf) + require.Empty(t, r.iBuf) // But then mixed adds should work as expected. r.addF(fSample{t: 1, f: 3.14}) @@ -146,10 +146,10 @@ func TestSampleRingAtFloatHistogram(t *testing.T) { // With ValNone as the preferred type, nothing should be initialized. r := newSampleRing(10, 2, chunkenc.ValNone) - require.Zero(t, len(r.fBuf)) - require.Zero(t, len(r.hBuf)) - require.Zero(t, len(r.fhBuf)) - require.Zero(t, len(r.iBuf)) + require.Empty(t, r.fBuf) + require.Empty(t, r.hBuf) + require.Empty(t, r.fhBuf) + require.Empty(t, r.iBuf) var ( h *histogram.Histogram diff --git a/storage/remote/client.go b/storage/remote/client.go index 11e423b6a..2a66739ed 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -287,7 +287,6 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo // we can continue handling. rs, _ := ParseWriteResponseStats(httpResp) - //nolint:usestdlibvars if httpResp.StatusCode/100 == 2 { return rs, nil } @@ -297,7 +296,6 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo body, _ := io.ReadAll(io.LimitReader(httpResp.Body, maxErrMsgLen)) err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, body) - //nolint:usestdlibvars if httpResp.StatusCode/100 == 5 || (c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) { return rs, RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))} @@ -382,7 +380,6 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err) } - //nolint:usestdlibvars if httpResp.StatusCode/100 != 2 { return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed))) } diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 17ff1850f..b1c899726 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -1522,7 +1522,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { // Send batches of at most MaxSamplesPerSend samples to the remote storage. // If we have fewer samples than that, flush them out after a deadline anyways. var ( - max = s.qm.cfg.MaxSamplesPerSend + maxCount = s.qm.cfg.MaxSamplesPerSend pBuf = proto.NewBuffer(nil) pBufRaw []byte @@ -1530,19 +1530,19 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { ) // TODO(@tpaschalis) Should we also raise the max if we have WAL metadata? if s.qm.sendExemplars { - max += int(float64(max) * 0.1) + maxCount += int(float64(maxCount) * 0.1) } // TODO: Dry all of this, we should make an interface/generic for the timeseries type. batchQueue := queue.Chan() - pendingData := make([]prompb.TimeSeries, max) + pendingData := make([]prompb.TimeSeries, maxCount) for i := range pendingData { pendingData[i].Samples = []prompb.Sample{{}} if s.qm.sendExemplars { pendingData[i].Exemplars = []prompb.Exemplar{{}} } } - pendingDataV2 := make([]writev2.TimeSeries, max) + pendingDataV2 := make([]writev2.TimeSeries, maxCount) for i := range pendingDataV2 { pendingDataV2[i].Samples = []writev2.Sample{{}} } diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index af2229b9a..6e1336a7a 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -453,10 +453,10 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenHistogramsHeader)) expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader)) - require.Empty(t, len(appendable.samples)) - require.Empty(t, len(appendable.histograms)) - require.Empty(t, len(appendable.exemplars)) - require.Empty(t, len(appendable.metadata)) + require.Empty(t, appendable.samples) + require.Empty(t, appendable.histograms) + require.Empty(t, appendable.exemplars) + require.Empty(t, appendable.metadata) return } diff --git a/template/template.go b/template/template.go index c507dbe74..9ffed6ff6 100644 --- a/template/template.go +++ b/template/template.go @@ -166,7 +166,7 @@ func NewTemplateExpander( return html_template.HTML(text) }, "match": regexp.MatchString, - "title": strings.Title, //nolint:staticcheck + "title": strings.Title, "toUpper": strings.ToUpper, "toLower": strings.ToLower, "graphLink": strutil.GraphLinkForExpression, diff --git a/tsdb/chunks/queue_test.go b/tsdb/chunks/queue_test.go index 9f761a5f3..3d9275eee 100644 --- a/tsdb/chunks/queue_test.go +++ b/tsdb/chunks/queue_test.go @@ -69,16 +69,16 @@ func TestQueuePushPopSingleGoroutine(t *testing.T) { const maxSize = 500 const maxIters = 50 - for max := 1; max < maxSize; max++ { - queue := newWriteJobQueue(max, 1+(r.Int()%max)) + for maxCount := 1; maxCount < maxSize; maxCount++ { + queue := newWriteJobQueue(maxCount, 1+(r.Int()%maxCount)) elements := 0 // total elements in the queue lastWriteID := 0 lastReadID := 0 for iter := 0; iter < maxIters; iter++ { - if elements < max { - toWrite := r.Int() % (max - elements) + if elements < maxCount { + toWrite := r.Int() % (maxCount - elements) if toWrite == 0 { toWrite = 1 } diff --git a/tsdb/db.go b/tsdb/db.go index 94c44161d..706e5bbac 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -693,7 +693,7 @@ func (db *DBReadOnly) LastBlockID() (string, error) { return "", err } - max := uint64(0) + maxT := uint64(0) lastBlockID := "" @@ -705,8 +705,8 @@ func (db *DBReadOnly) LastBlockID() (string, error) { continue // Not a block dir. } timestamp := ulidObj.Time() - if timestamp > max { - max = timestamp + if timestamp > maxT { + maxT = timestamp lastBlockID = dirName } } @@ -2316,13 +2316,13 @@ func blockDirs(dir string) ([]string, error) { return dirs, nil } -func exponential(d, min, max time.Duration) time.Duration { +func exponential(d, minD, maxD time.Duration) time.Duration { d *= 2 - if d < min { - d = min + if d < minD { + d = minD } - if d > max { - d = max + if d > maxD { + d = maxD } return d } diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 5943489ff..904fdeffc 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -4529,8 +4529,8 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSample addSample := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, _, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) _, _, err = scenario.appendFunc(app, series2, ts, 2*ts) @@ -4566,8 +4566,8 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSample var series1Samples, series2Samples []chunks.Sample for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, highest}} { fromMins, toMins := r[0], r[1] - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts)) series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts)) } @@ -4645,8 +4645,8 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSample verifySamples := func(block *Block, fromMins, toMins int64) { series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts)) series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts)) } @@ -4730,8 +4730,8 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, _, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) _, _, err = scenario.appendFunc(app, series2, ts, 2*ts) @@ -4785,8 +4785,8 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen verifySamples := func(block *Block, fromMins, toMins int64) { series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts)) series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts)) } @@ -4839,8 +4839,8 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, _, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) _, _, err = scenario.appendFunc(app, series2, ts, 2*ts) @@ -4894,8 +4894,8 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen verifySamples := func(block *Block, fromMins, toMins int64) { series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts)) series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts)) } @@ -4948,8 +4948,8 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, _, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) _, _, err = scenario.appendFunc(app, series2, ts, 2*ts) @@ -4996,8 +4996,8 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa verifySamples := func(fromMins, toMins int64) { series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts)) series2Samples = append(series2Samples, scenario.sampleFunc(ts, ts*2)) } @@ -5045,10 +5045,10 @@ func Test_Querier_OOOQuery(t *testing.T) { addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample) ([]chunks.Sample, int) { app := db.Appender(context.Background()) totalAppended := 0 - for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() { - _, err := app.Append(0, series1, min, float64(min)) - if min >= queryMinT && min <= queryMaxT { - expSamples = append(expSamples, sample{t: min, f: float64(min)}) + for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() { + _, err := app.Append(0, series1, m, float64(m)) + if m >= queryMinT && m <= queryMaxT { + expSamples = append(expSamples, sample{t: m, f: float64(m)}) } require.NoError(t, err) totalAppended++ @@ -5129,10 +5129,10 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample) ([]chunks.Sample, int) { app := db.Appender(context.Background()) totalAppended := 0 - for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() { - _, err := app.Append(0, series1, min, float64(min)) - if min >= queryMinT && min <= queryMaxT { - expSamples = append(expSamples, sample{t: min, f: float64(min)}) + for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() { + _, err := app.Append(0, series1, m, float64(m)) + if m >= queryMinT && m <= queryMaxT { + expSamples = append(expSamples, sample{t: m, f: float64(m)}) } require.NoError(t, err) totalAppended++ @@ -5239,9 +5239,9 @@ func testOOOAppendAndQuery(t *testing.T, scenario sampleTypeScenario) { app := db.Appender(context.Background()) key := lbls.String() from, to := minutes(fromMins), minutes(toMins) - for min := from; min <= to; min += time.Minute.Milliseconds() { + for m := from; m <= to; m += time.Minute.Milliseconds() { val := rand.Intn(1000) - _, s, err := scenario.appendFunc(app, lbls, min, int64(val)) + _, s, err := scenario.appendFunc(app, lbls, m, int64(val)) if faceError { require.Error(t, err) } else { @@ -5370,14 +5370,14 @@ func testOOODisabled(t *testing.T, scenario sampleTypeScenario) { app := db.Appender(context.Background()) key := lbls.String() from, to := minutes(fromMins), minutes(toMins) - for min := from; min <= to; min += time.Minute.Milliseconds() { - _, _, err := scenario.appendFunc(app, lbls, min, min) + for m := from; m <= to; m += time.Minute.Milliseconds() { + _, _, err := scenario.appendFunc(app, lbls, m, m) if faceError { require.Error(t, err) failedSamples++ } else { require.NoError(t, err) - expSamples[key] = append(expSamples[key], scenario.sampleFunc(min, min)) + expSamples[key] = append(expSamples[key], scenario.sampleFunc(m, m)) totalSamples++ } } @@ -5444,9 +5444,9 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) { app := db.Appender(context.Background()) key := lbls.String() from, to := minutes(fromMins), minutes(toMins) - for min := from; min <= to; min += time.Minute.Milliseconds() { + for m := from; m <= to; m += time.Minute.Milliseconds() { val := rand.Intn(1000) - _, s, err := scenario.appendFunc(app, lbls, min, int64(val)) + _, s, err := scenario.appendFunc(app, lbls, m, int64(val)) require.NoError(t, err) expSamples[key] = append(expSamples[key], s) totalSamples++ @@ -5635,8 +5635,8 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) { addSample := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, _, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) } @@ -5723,8 +5723,8 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) { verifySamples := func(block *Block, fromMins, toMins int64) { series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts)) } expRes := map[string][]chunks.Sample{ @@ -5772,8 +5772,8 @@ func TestWBLCorruption(t *testing.T) { var allSamples, expAfterRestart []chunks.Sample addSamples := func(fromMins, toMins int64, afterRestart bool) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, err := app.Append(0, series1, ts, float64(ts)) require.NoError(t, err) allSamples = append(allSamples, sample{t: ts, f: float64(ts)}) @@ -5926,8 +5926,8 @@ func testOOOMmapCorruption(t *testing.T, scenario sampleTypeScenario) { var allSamples, expInMmapChunks []chunks.Sample addSamples := func(fromMins, toMins int64, inMmapAfterCorruption bool) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, s, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) allSamples = append(allSamples, s) @@ -6071,8 +6071,8 @@ func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) { series1 := labels.FromStrings("foo", "bar1") addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool, allSamples []chunks.Sample) []chunks.Sample { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, s, err := scenario.appendFunc(app, series1, ts, ts) if success { require.NoError(t, err) @@ -6105,7 +6105,7 @@ func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) { // WBL is not empty. size, err := db.head.wbl.Size() require.NoError(t, err) - require.Greater(t, size, int64(0)) + require.Positive(t, size) require.Empty(t, db.Blocks()) require.NoError(t, db.compactOOOHead(ctx)) @@ -6282,8 +6282,8 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) { series1 := labels.FromStrings("foo", "bar1") addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, _, err := scenario.appendFunc(app, series1, ts, ts) if success { require.NoError(t, err) @@ -6296,8 +6296,8 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) { verifySamples := func(t *testing.T, db *DB, fromMins, toMins int64) { var expSamples []chunks.Sample - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() expSamples = append(expSamples, scenario.sampleFunc(ts, ts)) } @@ -6410,8 +6410,8 @@ func testWblReplayAfterOOODisableAndRestart(t *testing.T, scenario sampleTypeSce var allSamples []chunks.Sample addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, s, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) allSamples = append(allSamples, s) @@ -6477,8 +6477,8 @@ func testPanicOnApplyConfig(t *testing.T, scenario sampleTypeScenario) { var allSamples []chunks.Sample addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, s, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) allSamples = append(allSamples, s) @@ -6534,8 +6534,8 @@ func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenari var allSamples []chunks.Sample addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, s, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) allSamples = append(allSamples, s) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index b66ac7278..988ce9397 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1403,12 +1403,12 @@ func (s *memSeries) histogramsAppendPreprocessor(t int64, e chunkenc.Encoding, o // It assumes that the time range is 1/ratioToFull full. // Assuming that the samples will keep arriving at the same rate, it will make the // remaining n chunks within this chunk range (before max) equally sized. -func computeChunkEndTime(start, cur, max int64, ratioToFull float64) int64 { - n := float64(max-start) / (float64(cur-start+1) * ratioToFull) +func computeChunkEndTime(start, cur, maxT int64, ratioToFull float64) int64 { + n := float64(maxT-start) / (float64(cur-start+1) * ratioToFull) if n <= 1 { - return max + return maxT } - return int64(float64(start) + float64(max-start)/math.Floor(n)) + return int64(float64(start) + float64(maxT-start)/math.Floor(n)) } func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange int64) *memChunk { diff --git a/tsdb/head_test.go b/tsdb/head_test.go index f73ce38ba..0ce60b849 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -2757,7 +2757,7 @@ func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario) { require.Equal(t, int64(math.MinInt64), db.head.minValidTime.Load()) require.NoError(t, db.Compact(ctx)) - require.Greater(t, db.head.minValidTime.Load(), int64(0)) + require.Positive(t, db.head.minValidTime.Load()) app = db.Appender(ctx) _, err = appendSample(app, db.head.minValidTime.Load()-2) @@ -3677,7 +3677,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { require.Len(t, ms.mmappedChunks, 25) expMmapChunks := make([]*mmappedChunk, 0, 20) for _, mmap := range ms.mmappedChunks { - require.Greater(t, mmap.numSamples, uint16(0)) + require.Positive(t, mmap.numSamples) cpy := *mmap expMmapChunks = append(expMmapChunks, &cpy) } diff --git a/tsdb/index/postingsstats_test.go b/tsdb/index/postingsstats_test.go index 674e1c052..82f506bc8 100644 --- a/tsdb/index/postingsstats_test.go +++ b/tsdb/index/postingsstats_test.go @@ -20,10 +20,10 @@ import ( func TestPostingsStats(t *testing.T) { stats := &maxHeap{} - max := 3000000 - heapLength := 10 + const maxCount = 3000000 + const heapLength = 10 stats.init(heapLength) - for i := 0; i < max; i++ { + for i := 0; i < maxCount; i++ { item := Stat{ Name: "Label-da", Count: uint64(i), @@ -35,13 +35,13 @@ func TestPostingsStats(t *testing.T) { data := stats.get() require.Len(t, data, 10) for i := 0; i < heapLength; i++ { - require.Equal(t, uint64(max-i), data[i].Count) + require.Equal(t, uint64(maxCount-i), data[i].Count) } } func TestPostingsStats2(t *testing.T) { stats := &maxHeap{} - heapLength := 10 + const heapLength = 10 stats.init(heapLength) stats.push(Stat{Name: "Stuff", Count: 10}) @@ -57,12 +57,12 @@ func TestPostingsStats2(t *testing.T) { func BenchmarkPostingStatsMaxHep(b *testing.B) { stats := &maxHeap{} - max := 9000000 - heapLength := 10 + const maxCount = 9000000 + const heapLength = 10 b.ResetTimer() for n := 0; n < b.N; n++ { stats.init(heapLength) - for i := 0; i < max; i++ { + for i := 0; i < maxCount; i++ { item := Stat{ Name: "Label-da", Count: uint64(i), diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go index 668fbb5fb..993e930ce 100644 --- a/tsdb/wlog/wlog.go +++ b/tsdb/wlog/wlog.go @@ -612,16 +612,16 @@ func (w *WL) setSegment(segment *Segment) error { // flushPage writes the new contents of the page to disk. If no more records will fit into // the page, the remaining bytes will be set to zero and a new page will be started. -// If clear is true, this is enforced regardless of how many bytes are left in the page. -func (w *WL) flushPage(clear bool) error { +// If forceClear is true, this is enforced regardless of how many bytes are left in the page. +func (w *WL) flushPage(forceClear bool) error { w.metrics.pageFlushes.Inc() p := w.page - clear = clear || p.full() + shouldClear := forceClear || p.full() // No more data will fit into the page or an implicit clear. // Enqueue and clear it. - if clear { + if shouldClear { p.alloc = pageSize // Write till end of page. } @@ -633,7 +633,7 @@ func (w *WL) flushPage(clear bool) error { p.flushed += n // We flushed an entire page, prepare a new one. - if clear { + if shouldClear { p.reset() w.donePages++ w.metrics.pageCompletions.Inc() diff --git a/util/testutil/directory.go b/util/testutil/directory.go index 8aa17702d..38dabd183 100644 --- a/util/testutil/directory.go +++ b/util/testutil/directory.go @@ -155,7 +155,7 @@ func DirHash(t *testing.T, path string) []byte { modTime, err := info.ModTime().GobEncode() require.NoError(t, err) - _, err = io.WriteString(hash, string(modTime)) + _, err = hash.Write(modTime) require.NoError(t, err) return nil }) diff --git a/web/web.go b/web/web.go index 9426ed935..9fba6fc95 100644 --- a/web/web.go +++ b/web/web.go @@ -481,14 +481,14 @@ func New(logger log.Logger, o *Options) *Handler { router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, o.AppName+" is Healthy.\n") + fmt.Fprint(w, o.AppName+" is Healthy.\n") }) router.Head("/-/healthy", func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) }) router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, o.AppName+" is Ready.\n") + fmt.Fprint(w, o.AppName+" is Ready.\n") })) router.Head("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) From 4023c2405a32686a5fd3779fd310d0307922d445 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Mon, 19 Aug 2024 10:43:05 +0200 Subject: [PATCH 96/99] Update web/web.go Co-authored-by: Julien <291750+roidelapluie@users.noreply.github.com> Signed-off-by: Arve Knudsen --- web/web.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/web.go b/web/web.go index 9fba6fc95..5cc23760a 100644 --- a/web/web.go +++ b/web/web.go @@ -481,7 +481,7 @@ func New(logger log.Logger, o *Options) *Handler { router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprint(w, o.AppName+" is Healthy.\n") + fmt.Fprintf(w, "%s is Healthy.\n", o.AppName) }) router.Head("/-/healthy", func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) From ee4715064649514c06cda0b1a267484e2af7a1da Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Mon, 19 Aug 2024 10:43:17 +0200 Subject: [PATCH 97/99] Update web/web.go Co-authored-by: Julien <291750+roidelapluie@users.noreply.github.com> Signed-off-by: Arve Knudsen --- web/web.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/web.go b/web/web.go index 5cc23760a..8e84acd03 100644 --- a/web/web.go +++ b/web/web.go @@ -488,7 +488,7 @@ func New(logger log.Logger, o *Options) *Handler { }) router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprint(w, o.AppName+" is Ready.\n") + fmt.Fprintf(w, "%s is Ready.\n", o.AppName) })) router.Head("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) From 250aa5031dc5e66ad6b7d1e077461cad64e15b10 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Mon, 19 Aug 2024 10:50:27 +0200 Subject: [PATCH 98/99] Remove empty line Signed-off-by: Arve Knudsen --- notifier/notifier.go | 1 - 1 file changed, 1 deletion(-) diff --git a/notifier/notifier.go b/notifier/notifier.go index 2ebfbbee5..218e4cb8c 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -674,7 +674,6 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b }() // Any HTTP status 2xx is OK. - if resp.StatusCode/100 != 2 { return fmt.Errorf("bad response status %s", resp.Status) } From f9f39a4411645778dcd4a2fffe7d97249b5b212e Mon Sep 17 00:00:00 2001 From: Julien <291750+roidelapluie@users.noreply.github.com> Date: Tue, 20 Aug 2024 17:09:07 +0200 Subject: [PATCH 99/99] Extend testing CA certificates (#14696) Signed-off-by: Julien --- scrape/testdata/ca.cer | 18 +++++++++--------- tracing/testdata/ca.cer | 18 +++++++++--------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/scrape/testdata/ca.cer b/scrape/testdata/ca.cer index 86f627a90..df9344392 100644 --- a/scrape/testdata/ca.cer +++ b/scrape/testdata/ca.cer @@ -1,8 +1,8 @@ -----BEGIN CERTIFICATE----- MIIDkTCCAnmgAwIBAgIJAJNsnimNN3tmMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg -Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0xNTA4 -MDQxNDA5MjFaFw0yNTA4MDExNDA5MjFaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH +Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0yNDA4 +MjAxMTUxMjNaFw00NDEyMDUxMTUxMjNaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxGzAZ BgNVBAMMElByb21ldGhldXMgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP ADCCAQoCggEBAOlSBU3yWpUELbhzizznR0hnAL7dbEHzfEtEc6N3PoSvMNcqrUVq @@ -12,11 +12,11 @@ yB9M1ypWomzBz1UFXZp1oiNO5o7/dgXW4MgLUfC2obJ9j5xqpc6GkhWMW4ZFwEr/ VLjuzxG9B8tLfQuhnXKGn1W8+WzZVWCWMD/sLfZfmjKaWlwcXzL51g8E+IEIBJqV w51aMI6lDkcvAM7gLq1auLZMVXyKWSKw7XMCAwEAAaNQME4wHQYDVR0OBBYEFMz1 BZnlqxJp2HiJSjHK8IsLrWYbMB8GA1UdIwQYMBaAFMz1BZnlqxJp2HiJSjHK8IsL -rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAI2iA3w3TK5J15Pu -e4fPFB4jxQqsbUwuyXbCCv/jKLeFNCD4BjM181WZEYjPMumeTBVzU3aF45LWQIG1 -0DJcrCL4mjMz9qgAoGqA7aDDXiJGbukMgYYsn7vrnVmrZH8T3E8ySlltr7+W578k -pJ5FxnbCroQwn0zLyVB3sFbS8E3vpBr3L8oy8PwPHhIScexcNVc3V6/m4vTZsXTH -U+vUm1XhDgpDcFMTg2QQiJbfpOYUkwIgnRDAT7t282t2KQWtnlqc3zwPQ1F/6Cpx -j19JeNsaF1DArkD7YlyKj/GhZLtHwFHG5cxznH0mLDJTW7bQvqqh2iQTeXmBk1lU -mM5lH/s= +rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEqhsLzIh098lmLl +CSmuOi5o0NLFaO3qgzaxhvO56RkrtnMQb99/u/F2gQPBoVrubES4fBDRWtfBkmRZ +NabgqghBN27nyLa9DEtHeOzEtBWjYnZKOY5uGf/wwIp+HM2H5QBs8c4nJv+46ev3 +L73CS5zWV950dLNPA5iatQgtFsp/tsh2YoYbfPI+bHjMLJWau3cl6ID/m+j4moU7 +hbcXTnehz0250CXoXYzmfPHZUjA97Cs3kbzi6Dkxbz3pmHCAfEHdGRMFIZR7Fs/Y +7k44NF5q/82FrI+Umt1OdwUTprSAUrKXZHaI9N1CClAcgP1LbqliEKrvLsEvvg7C +LrUoX4M= -----END CERTIFICATE----- diff --git a/tracing/testdata/ca.cer b/tracing/testdata/ca.cer index 86f627a90..df9344392 100644 --- a/tracing/testdata/ca.cer +++ b/tracing/testdata/ca.cer @@ -1,8 +1,8 @@ -----BEGIN CERTIFICATE----- MIIDkTCCAnmgAwIBAgIJAJNsnimNN3tmMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg -Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0xNTA4 -MDQxNDA5MjFaFw0yNTA4MDExNDA5MjFaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH +Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0yNDA4 +MjAxMTUxMjNaFw00NDEyMDUxMTUxMjNaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxGzAZ BgNVBAMMElByb21ldGhldXMgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP ADCCAQoCggEBAOlSBU3yWpUELbhzizznR0hnAL7dbEHzfEtEc6N3PoSvMNcqrUVq @@ -12,11 +12,11 @@ yB9M1ypWomzBz1UFXZp1oiNO5o7/dgXW4MgLUfC2obJ9j5xqpc6GkhWMW4ZFwEr/ VLjuzxG9B8tLfQuhnXKGn1W8+WzZVWCWMD/sLfZfmjKaWlwcXzL51g8E+IEIBJqV w51aMI6lDkcvAM7gLq1auLZMVXyKWSKw7XMCAwEAAaNQME4wHQYDVR0OBBYEFMz1 BZnlqxJp2HiJSjHK8IsLrWYbMB8GA1UdIwQYMBaAFMz1BZnlqxJp2HiJSjHK8IsL -rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAI2iA3w3TK5J15Pu -e4fPFB4jxQqsbUwuyXbCCv/jKLeFNCD4BjM181WZEYjPMumeTBVzU3aF45LWQIG1 -0DJcrCL4mjMz9qgAoGqA7aDDXiJGbukMgYYsn7vrnVmrZH8T3E8ySlltr7+W578k -pJ5FxnbCroQwn0zLyVB3sFbS8E3vpBr3L8oy8PwPHhIScexcNVc3V6/m4vTZsXTH -U+vUm1XhDgpDcFMTg2QQiJbfpOYUkwIgnRDAT7t282t2KQWtnlqc3zwPQ1F/6Cpx -j19JeNsaF1DArkD7YlyKj/GhZLtHwFHG5cxznH0mLDJTW7bQvqqh2iQTeXmBk1lU -mM5lH/s= +rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEqhsLzIh098lmLl +CSmuOi5o0NLFaO3qgzaxhvO56RkrtnMQb99/u/F2gQPBoVrubES4fBDRWtfBkmRZ +NabgqghBN27nyLa9DEtHeOzEtBWjYnZKOY5uGf/wwIp+HM2H5QBs8c4nJv+46ev3 +L73CS5zWV950dLNPA5iatQgtFsp/tsh2YoYbfPI+bHjMLJWau3cl6ID/m+j4moU7 +hbcXTnehz0250CXoXYzmfPHZUjA97Cs3kbzi6Dkxbz3pmHCAfEHdGRMFIZR7Fs/Y +7k44NF5q/82FrI+Umt1OdwUTprSAUrKXZHaI9N1CClAcgP1LbqliEKrvLsEvvg7C +LrUoX4M= -----END CERTIFICATE-----