From 8b84856ba5895a9a24e2176a200342b6f2072beb Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 5 Jul 2023 15:29:00 +1000 Subject: [PATCH 1/7] Add EstimatedChunkCount() method to ChunkSeries Signed-off-by: Charles Korn --- storage/interface.go | 6 ++ storage/merge.go | 87 ++++++++++++++++++++++++++++ storage/merge_test.go | 18 +++++- storage/series.go | 38 +++++++++++++ storage/series_test.go | 126 ++++++++++++++++++++++++++++++++++++++++- tsdb/querier.go | 4 ++ 6 files changed, 273 insertions(+), 6 deletions(-) diff --git a/storage/interface.go b/storage/interface.go index 2a440cc93c..158c2354b8 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -415,6 +415,12 @@ type ChunkSeriesSet interface { type ChunkSeries interface { Labels ChunkIterable + + // EstimatedChunkCount returns an estimate of the number of chunks available from this ChunkSeries. + // + // This estimate is used by Mimir's ingesters to report the number of chunks expected to be returned by a query, + // which is used by queriers to enforce the 'max chunks per query' limit. + EstimatedChunkCount() int } // Labels represents an item that has labels e.g. time series. diff --git a/storage/merge.go b/storage/merge.go index c0665d720b..61f873cf36 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -658,6 +658,7 @@ func NewCompactingChunkSeriesMerger(mergeFunc VerticalSeriesMergeFunc) VerticalC if len(series) == 0 { return nil } + return &ChunkSeriesEntry{ Lset: series[0].Labels(), ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator { @@ -670,10 +671,86 @@ func NewCompactingChunkSeriesMerger(mergeFunc VerticalSeriesMergeFunc) VerticalC iterators: iterators, } }, + ChunkCountFn: func() int { + return estimateCompactedChunkCount(series) + }, } } } +// estimateCompactedChunkCount computes an estimate of the resulting number of chunks +// after compacting series. +// +// The estimate is imperfect in a few ways, due to the fact it does not examine individual samples: +// - it does not check for duplicate samples in overlapping chunks, and so may overestimate +// the resulting number of chunks if duplicate samples are present, or if an entire chunk is +// duplicated +// - it does not check if overlapping chunks have samples that swap back and forth between +// different encodings over time, and so may underestimate the resulting number of chunks +func estimateCompactedChunkCount(series []ChunkSeries) int { + h := make(chunkIteratorHeap, 0, len(series)) + + for _, s := range series { + iter := s.Iterator(nil) + if iter.Next() { + h.Push(iter) + } + } + + totalChunkCount := 0 + + for len(h) > 0 { + iter := heap.Pop(&h).(chunks.Iterator) + prev := iter.At() + if iter.Next() { + heap.Push(&h, iter) + } + + chunkCountForThisTimePeriod := 0 + sampleCount := prev.Chunk.NumSamples() + maxTime := prev.MaxTime + + // Find all overlapping chunks and estimate the number of resulting chunks. + for len(h) > 0 && h[0].At().MinTime <= maxTime { + iter := heap.Pop(&h).(chunks.Iterator) + next := iter.At() + if iter.Next() { + heap.Push(&h, iter) + } + + if next.MaxTime > maxTime { + maxTime = next.MaxTime + } + + if prev.Chunk.Encoding() != next.Chunk.Encoding() { + // If we have more than seriesToChunkEncoderSplit samples, account for the additional chunks we'll create. + chunkCountForThisTimePeriod += sampleCount / seriesToChunkEncoderSplit + if sampleCount%seriesToChunkEncoderSplit > 0 { + chunkCountForThisTimePeriod++ + } + + sampleCount = 0 + } + + sampleCount += next.Chunk.NumSamples() + prev = next + } + + // If we have more than seriesToChunkEncoderSplit samples, account for the additional chunks we'll create. + chunkCountForThisTimePeriod += sampleCount / seriesToChunkEncoderSplit + if sampleCount%seriesToChunkEncoderSplit > 0 { + chunkCountForThisTimePeriod++ + } + if chunkCountForThisTimePeriod == 0 { + chunkCountForThisTimePeriod = 1 // We'll always create at least one chunk. + } + + totalChunkCount += chunkCountForThisTimePeriod + } + + return totalChunkCount +} + // compactChunkIterator is responsible to compact chunks from different iterators of the same time series into single chainSeries. // If time-overlapping chunks are found, they are encoded and passed to series merge and encoded again into one bigger chunk. // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 @@ -801,6 +878,7 @@ func NewConcatenatingChunkSeriesMerger() VerticalChunkSeriesMergeFunc { if len(series) == 0 { return nil } + return &ChunkSeriesEntry{ Lset: series[0].Labels(), ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator { @@ -812,6 +890,15 @@ func NewConcatenatingChunkSeriesMerger() VerticalChunkSeriesMergeFunc { iterators: iterators, } }, + ChunkCountFn: func() int { + chunkCount := 0 + + for _, series := range series { + chunkCount += series.EstimatedChunkCount() + } + + return chunkCount + }, } } } diff --git a/storage/merge_test.go b/storage/merge_test.go index b0544c2d81..98daed10c5 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -394,9 +394,10 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { } for _, tc := range []struct { - name string - input []ChunkSeries - expected ChunkSeries + name string + input []ChunkSeries + expected ChunkSeries + expectedChunksEstimate int }{ { name: "single empty series", @@ -483,6 +484,7 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 110), ), + expectedChunksEstimate: 2, // Estimation doesn't consider duplicate series when estimating the number of chunks. }, { name: "150 overlapping samples, split chunk", @@ -520,6 +522,7 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { []tsdbutil.Sample{fSample{12, 12}, fSample{14, 14}}, []tsdbutil.Sample{histogramSample(15)}, ), + expectedChunksEstimate: 4, // Estimation assumes overlapping chunks don't swap back and forth between different encodings. }, { name: "float histogram chunks overlapping", @@ -546,6 +549,7 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { []tsdbutil.Sample{fSample{12, 12}, fSample{14, 14}}, []tsdbutil.Sample{floatHistogramSample(15)}, ), + expectedChunksEstimate: 4, // Estimation assumes overlapping chunks don't swap back and forth between different encodings. }, { name: "float histogram chunks overlapping with histogram chunks", @@ -560,6 +564,7 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { []tsdbutil.Sample{histogramSample(12), histogramSample(14)}, []tsdbutil.Sample{floatHistogramSample(15)}, ), + expectedChunksEstimate: 4, // Estimation assumes overlapping chunks don't swap back and forth between different encodings. }, } { t.Run(tc.name, func(t *testing.T) { @@ -570,6 +575,12 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { require.Equal(t, expErr, actErr) require.Equal(t, expChks, actChks) + + if tc.expectedChunksEstimate == 0 { + tc.expectedChunksEstimate = len(actChks) + } + + require.Equalf(t, tc.expectedChunksEstimate, merged.EstimatedChunkCount(), "expected estimate of %v chunks, actual chunks are: %v", tc.expectedChunksEstimate, actChks) }) } } @@ -704,6 +715,7 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) { require.Equal(t, expErr, actErr) require.Equal(t, expChks, actChks) + require.Equal(t, len(expChks), merged.EstimatedChunkCount()) }) } } diff --git a/storage/series.go b/storage/series.go index b73f1e35ce..7d0b8a5193 100644 --- a/storage/series.go +++ b/storage/series.go @@ -35,11 +35,13 @@ func (s *SeriesEntry) Iterator(it chunkenc.Iterator) chunkenc.Iterator { return type ChunkSeriesEntry struct { Lset labels.Labels + ChunkCountFn func() int ChunkIteratorFn func(chunks.Iterator) chunks.Iterator } func (s *ChunkSeriesEntry) Labels() labels.Labels { return s.Lset } func (s *ChunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator { return s.ChunkIteratorFn(it) } +func (s *ChunkSeriesEntry) EstimatedChunkCount() int { return s.ChunkCountFn() } // NewListSeries returns series entry with iterator that allows to iterate over provided samples. func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry { @@ -78,6 +80,7 @@ func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]tsdbutil.Sam } return NewListChunkSeriesIterator(chks...) }, + ChunkCountFn: func() int { return len(samples) }, // We create one chunk per slice of samples. } } @@ -399,6 +402,41 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { return NewListChunkSeriesIterator(chks...) } +// EstimatedChunkCount returns an estimate of the number of chunks produced by Iterator. +// +// It is perfectly accurate except when histograms are present in series. When histograms are +// present, EstimatedChunkCount will underestimate the number of chunks produced, as the +// estimation does not consider individual samples and so triggers for new chunks such as +// counter resets, changes to the bucket schema and changes to the zero threshold are not +// taken into account. +func (s *seriesToChunkEncoder) EstimatedChunkCount() int { + chunkCount := 0 + seriesIter := s.Series.Iterator(nil) + lastType := chunkenc.ValNone + samplesInChunk := 0 + + for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() { + if chunkCount == 0 { + // We'll always have at least one chunk if there's at least one sample. + chunkCount++ + } else if lastType != typ { + // If the sample type changes, then we'll cut a new chunk. + chunkCount++ + samplesInChunk = 0 + } + + if samplesInChunk == seriesToChunkEncoderSplit { + chunkCount++ + samplesInChunk = 0 + } + + lastType = typ + samplesInChunk++ + } + + return chunkCount +} + func appendChunk(chks []chunks.Meta, mint, maxt int64, chk chunkenc.Chunk) []chunks.Meta { if chk != nil { chks = append(chks, chunks.Meta{ diff --git a/storage/series_test.go b/storage/series_test.go index 5c74fae096..5f8ba68c03 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -73,6 +73,36 @@ func TestListSeriesIterator(t *testing.T) { require.Equal(t, chunkenc.ValNone, it.Seek(2)) } +func TestNewListChunkSeriesFromSamples(t *testing.T) { + lbls := labels.FromStrings("__name__", "the_series") + series := NewListChunkSeriesFromSamples( + lbls, + samples{ + fSample{0, 0}, + fSample{1, 1}, + fSample{1, 1.5}, + fSample{2, 2}, + fSample{3, 3}, + }, + samples{ + fSample{4, 5}, + }, + ) + + require.Equal(t, lbls, series.Labels()) + + it := series.Iterator(nil) + chks := []chunks.Meta{} + + for it.Next() { + chks = append(chks, it.At()) + } + + require.NoError(t, it.Err()) + require.Len(t, chks, 2) + require.Equal(t, len(chks), series.EstimatedChunkCount(), "should have one chunk per sample") +} + // TestSeriesSetToChunkSet test the property of SeriesSet that says // returned series should be iterable even after Next is called. func TestChunkSeriesSetToSeriesSet(t *testing.T) { @@ -125,9 +155,84 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { } } +func TestSeriesToChunks(t *testing.T) { + generateSamples := func(count int) []tsdbutil.Sample { + s := make([]tsdbutil.Sample, count) + + for i := 0; i < count; i++ { + s[i] = fSample{t: int64(i), f: float64(i) * 10.0} + } + + return s + } + + h := &histogram.Histogram{ + Count: 0, + ZeroThreshold: 0.001, + Schema: 0, + } + + testCases := map[string]struct { + samples []tsdbutil.Sample + expectedChunkCount int + }{ + "no samples": { + samples: []tsdbutil.Sample{}, + expectedChunkCount: 0, + }, + "single sample": { + samples: generateSamples(1), + expectedChunkCount: 1, + }, + "120 samples": { + samples: generateSamples(120), + expectedChunkCount: 1, + }, + "121 samples": { + samples: generateSamples(121), + expectedChunkCount: 2, + }, + "240 samples": { + samples: generateSamples(240), + expectedChunkCount: 2, + }, + "241 samples": { + samples: generateSamples(241), + expectedChunkCount: 3, + }, + "float samples and histograms": { + samples: []tsdbutil.Sample{ + fSample{t: 1, f: 10}, + fSample{t: 2, f: 20}, + hSample{t: 3, h: h}, + fSample{t: 4, f: 40}, + }, + expectedChunkCount: 3, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + lset := labels.FromStrings("__name__", "test_series") + series := NewListSeries(lset, testCase.samples) + encoder := NewSeriesToChunkEncoder(series) + require.Equal(t, lset, encoder.Labels()) + + chks, err := ExpandChunks(encoder.Iterator(nil)) + require.NoError(t, err) + require.Len(t, chks, testCase.expectedChunkCount) + require.Equal(t, testCase.expectedChunkCount, encoder.EstimatedChunkCount()) + + encodedSamples := expandChunks(chks) + require.Equal(t, testCase.samples, encodedSamples) + }) + } +} + type histogramTest struct { samples []tsdbutil.Sample expectedCounterResetHeaders []chunkenc.CounterResetHeader + expectedChunkCountEstimate int } func TestHistogramSeriesToChunks(t *testing.T) { @@ -288,6 +393,7 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 2, h: h1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, + expectedChunkCountEstimate: 1, // Estimate doesn't consider counter resets. }, "histogram and stale sample encoded to two chunks": { samples: []tsdbutil.Sample{ @@ -295,6 +401,7 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 2, h: h1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, + expectedChunkCountEstimate: 1, // Estimate doesn't consider stale markers. }, "histogram and reduction in bucket encoded to two chunks": { samples: []tsdbutil.Sample{ @@ -302,6 +409,7 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 2, h: h2down}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, + expectedChunkCountEstimate: 1, // Estimate doesn't consider counter resets. }, // Float histograms. "single float histogram to single chunk": { @@ -323,6 +431,7 @@ func TestHistogramSeriesToChunks(t *testing.T) { fhSample{t: 2, fh: fh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, + expectedChunkCountEstimate: 1, // Estimate doesn't consider counter resets. }, "float histogram and stale sample encoded to two chunks": { samples: []tsdbutil.Sample{ @@ -330,6 +439,7 @@ func TestHistogramSeriesToChunks(t *testing.T) { fhSample{t: 2, fh: fh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, + expectedChunkCountEstimate: 1, // Estimate doesn't consider stale markers. }, "float histogram and reduction in bucket encoded to two chunks": { samples: []tsdbutil.Sample{ @@ -337,6 +447,7 @@ func TestHistogramSeriesToChunks(t *testing.T) { fhSample{t: 2, fh: fh2down}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, + expectedChunkCountEstimate: 1, // Estimate doesn't consider counter resets. }, // Mixed. "histogram and float histogram encoded to two chunks": { @@ -430,8 +541,14 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { require.NoError(t, err) require.Equal(t, len(test.expectedCounterResetHeaders), len(chks)) + if test.expectedChunkCountEstimate == 0 { + test.expectedChunkCountEstimate = len(chks) + } + + require.Equal(t, test.expectedChunkCountEstimate, encoder.EstimatedChunkCount()) + // Decode all encoded samples and assert they are equal to the original ones. - encodedSamples := expandHistogramSamples(chks) + encodedSamples := expandChunks(chks) require.Equal(t, len(test.samples), len(encodedSamples)) for i, s := range test.samples { @@ -470,9 +587,9 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { } } -func expandHistogramSamples(chunks []chunks.Meta) (result []tsdbutil.Sample) { +func expandChunks(chunks []chunks.Meta) (result []tsdbutil.Sample) { if len(chunks) == 0 { - return + return []tsdbutil.Sample{} } for _, chunk := range chunks { @@ -485,6 +602,9 @@ func expandHistogramSamples(chunks []chunks.Meta) (result []tsdbutil.Sample) { case chunkenc.ValFloatHistogram: t, fh := it.AtFloatHistogram() result = append(result, fhSample{t: t, fh: fh}) + case chunkenc.ValFloat: + t, f := it.At() + result = append(result, fSample{t: t, f: f}) default: panic("unexpected value type") } diff --git a/tsdb/querier.go b/tsdb/querier.go index 854687298c..cbee873baf 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -649,6 +649,10 @@ func (s *chunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator { return pi } +func (s *chunkSeriesEntry) EstimatedChunkCount() int { + return len(s.chks) +} + // populateWithDelSeriesIterator allows to iterate over samples for the single series. type populateWithDelSeriesIterator struct { populateWithDelGenericSeriesIterator From 1299d98c463f23d1c6ef296d4f7e6a55a83cbff9 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Thu, 6 Jul 2023 13:57:08 +1000 Subject: [PATCH 2/7] Fix incorrect estimated chunks for `NewCompactingChunkSeriesMerger` if series are not already sorted. --- storage/merge.go | 2 +- storage/merge_test.go | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/storage/merge.go b/storage/merge.go index 61f873cf36..e1c248cb46 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -693,7 +693,7 @@ func estimateCompactedChunkCount(series []ChunkSeries) int { for _, s := range series { iter := s.Iterator(nil) if iter.Next() { - h.Push(iter) + heap.Push(&h, iter) } } diff --git a/storage/merge_test.go b/storage/merge_test.go index 98daed10c5..e5eb296d1b 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -429,6 +429,14 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}, []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), }, + { + name: "two non overlapping in reverse order", + input: []ChunkSeries{ + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}), + }, + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}, []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), + }, { name: "two overlapping", input: []ChunkSeries{ From f71a97b460a691074fbf6766798b3377b8fa21f6 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Thu, 6 Jul 2023 14:27:14 +1000 Subject: [PATCH 3/7] Address PR feedback: add comment explaining that we don't expect one case to happen often --- storage/merge.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/merge.go b/storage/merge.go index e1c248cb46..1728f87e6a 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -687,6 +687,8 @@ func NewCompactingChunkSeriesMerger(mergeFunc VerticalSeriesMergeFunc) VerticalC // duplicated // - it does not check if overlapping chunks have samples that swap back and forth between // different encodings over time, and so may underestimate the resulting number of chunks +// (we don't expect this to happen often though, as switching from float samples to histograms +// involves changing the instrumentation) func estimateCompactedChunkCount(series []ChunkSeries) int { h := make(chunkIteratorHeap, 0, len(series)) From 31235c351fc4f2e13a7811d7f32e187186037225 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Thu, 6 Jul 2023 15:25:11 +1000 Subject: [PATCH 4/7] Simplify trickier estimation cases by not estimating at all. --- storage/merge.go | 105 +++++++++-------------------------------- storage/merge_test.go | 17 ++----- storage/series.go | 32 +++---------- storage/series_test.go | 13 +---- 4 files changed, 34 insertions(+), 133 deletions(-) diff --git a/storage/merge.go b/storage/merge.go index 1728f87e6a..33023b1932 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -659,98 +659,39 @@ func NewCompactingChunkSeriesMerger(mergeFunc VerticalSeriesMergeFunc) VerticalC return nil } + chunkIteratorFn := func(chunks.Iterator) chunks.Iterator { + iterators := make([]chunks.Iterator, 0, len(series)) + for _, s := range series { + iterators = append(iterators, s.Iterator(nil)) + } + return &compactChunkIterator{ + mergeFunc: mergeFunc, + iterators: iterators, + } + } + return &ChunkSeriesEntry{ - Lset: series[0].Labels(), - ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator { - iterators := make([]chunks.Iterator, 0, len(series)) - for _, s := range series { - iterators = append(iterators, s.Iterator(nil)) - } - return &compactChunkIterator{ - mergeFunc: mergeFunc, - iterators: iterators, - } - }, + Lset: series[0].Labels(), + ChunkIteratorFn: chunkIteratorFn, ChunkCountFn: func() int { - return estimateCompactedChunkCount(series) + // This method is expensive, but we don't expect to ever actually use this on the ingester query path in Mimir - + // it's just here to ensure things don't break if this assumption ever changes. + // Ingesters return uncompacted chunks to queriers, so this method is never called. + return countChunks(chunkIteratorFn) }, } } } -// estimateCompactedChunkCount computes an estimate of the resulting number of chunks -// after compacting series. -// -// The estimate is imperfect in a few ways, due to the fact it does not examine individual samples: -// - it does not check for duplicate samples in overlapping chunks, and so may overestimate -// the resulting number of chunks if duplicate samples are present, or if an entire chunk is -// duplicated -// - it does not check if overlapping chunks have samples that swap back and forth between -// different encodings over time, and so may underestimate the resulting number of chunks -// (we don't expect this to happen often though, as switching from float samples to histograms -// involves changing the instrumentation) -func estimateCompactedChunkCount(series []ChunkSeries) int { - h := make(chunkIteratorHeap, 0, len(series)) +func countChunks(chunkIteratorFn func(chunks.Iterator) chunks.Iterator) int { + chunkCount := 0 + it := chunkIteratorFn(nil) - for _, s := range series { - iter := s.Iterator(nil) - if iter.Next() { - heap.Push(&h, iter) - } + for it.Next() { + chunkCount++ } - totalChunkCount := 0 - - for len(h) > 0 { - iter := heap.Pop(&h).(chunks.Iterator) - prev := iter.At() - if iter.Next() { - heap.Push(&h, iter) - } - - chunkCountForThisTimePeriod := 0 - sampleCount := prev.Chunk.NumSamples() - maxTime := prev.MaxTime - - // Find all overlapping chunks and estimate the number of resulting chunks. - for len(h) > 0 && h[0].At().MinTime <= maxTime { - iter := heap.Pop(&h).(chunks.Iterator) - next := iter.At() - if iter.Next() { - heap.Push(&h, iter) - } - - if next.MaxTime > maxTime { - maxTime = next.MaxTime - } - - if prev.Chunk.Encoding() != next.Chunk.Encoding() { - // If we have more than seriesToChunkEncoderSplit samples, account for the additional chunks we'll create. - chunkCountForThisTimePeriod += sampleCount / seriesToChunkEncoderSplit - if sampleCount%seriesToChunkEncoderSplit > 0 { - chunkCountForThisTimePeriod++ - } - - sampleCount = 0 - } - - sampleCount += next.Chunk.NumSamples() - prev = next - } - - // If we have more than seriesToChunkEncoderSplit samples, account for the additional chunks we'll create. - chunkCountForThisTimePeriod += sampleCount / seriesToChunkEncoderSplit - if sampleCount%seriesToChunkEncoderSplit > 0 { - chunkCountForThisTimePeriod++ - } - if chunkCountForThisTimePeriod == 0 { - chunkCountForThisTimePeriod = 1 // We'll always create at least one chunk. - } - - totalChunkCount += chunkCountForThisTimePeriod - } - - return totalChunkCount + return chunkCount } // compactChunkIterator is responsible to compact chunks from different iterators of the same time series into single chainSeries. diff --git a/storage/merge_test.go b/storage/merge_test.go index e5eb296d1b..79772dd4ba 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -394,10 +394,9 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { } for _, tc := range []struct { - name string - input []ChunkSeries - expected ChunkSeries - expectedChunksEstimate int + name string + input []ChunkSeries + expected ChunkSeries }{ { name: "single empty series", @@ -492,7 +491,6 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 110), ), - expectedChunksEstimate: 2, // Estimation doesn't consider duplicate series when estimating the number of chunks. }, { name: "150 overlapping samples, split chunk", @@ -530,7 +528,6 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { []tsdbutil.Sample{fSample{12, 12}, fSample{14, 14}}, []tsdbutil.Sample{histogramSample(15)}, ), - expectedChunksEstimate: 4, // Estimation assumes overlapping chunks don't swap back and forth between different encodings. }, { name: "float histogram chunks overlapping", @@ -557,7 +554,6 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { []tsdbutil.Sample{fSample{12, 12}, fSample{14, 14}}, []tsdbutil.Sample{floatHistogramSample(15)}, ), - expectedChunksEstimate: 4, // Estimation assumes overlapping chunks don't swap back and forth between different encodings. }, { name: "float histogram chunks overlapping with histogram chunks", @@ -572,7 +568,6 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { []tsdbutil.Sample{histogramSample(12), histogramSample(14)}, []tsdbutil.Sample{floatHistogramSample(15)}, ), - expectedChunksEstimate: 4, // Estimation assumes overlapping chunks don't swap back and forth between different encodings. }, } { t.Run(tc.name, func(t *testing.T) { @@ -584,11 +579,7 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { require.Equal(t, expErr, actErr) require.Equal(t, expChks, actChks) - if tc.expectedChunksEstimate == 0 { - tc.expectedChunksEstimate = len(actChks) - } - - require.Equalf(t, tc.expectedChunksEstimate, merged.EstimatedChunkCount(), "expected estimate of %v chunks, actual chunks are: %v", tc.expectedChunksEstimate, actChks) + require.Len(t, actChks, merged.EstimatedChunkCount()) }) } } diff --git a/storage/series.go b/storage/series.go index 7d0b8a5193..3bc91abec7 100644 --- a/storage/series.go +++ b/storage/series.go @@ -403,35 +403,15 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { } // EstimatedChunkCount returns an estimate of the number of chunks produced by Iterator. -// -// It is perfectly accurate except when histograms are present in series. When histograms are -// present, EstimatedChunkCount will underestimate the number of chunks produced, as the -// estimation does not consider individual samples and so triggers for new chunks such as -// counter resets, changes to the bucket schema and changes to the zero threshold are not -// taken into account. func (s *seriesToChunkEncoder) EstimatedChunkCount() int { + // This method is expensive, but we don't expect to ever actually use this on the ingester query path in Mimir - + // it's just here to ensure things don't break if this assumption ever changes. + chunkCount := 0 - seriesIter := s.Series.Iterator(nil) - lastType := chunkenc.ValNone - samplesInChunk := 0 + it := s.Iterator(nil) - for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() { - if chunkCount == 0 { - // We'll always have at least one chunk if there's at least one sample. - chunkCount++ - } else if lastType != typ { - // If the sample type changes, then we'll cut a new chunk. - chunkCount++ - samplesInChunk = 0 - } - - if samplesInChunk == seriesToChunkEncoderSplit { - chunkCount++ - samplesInChunk = 0 - } - - lastType = typ - samplesInChunk++ + for it.Next() { + chunkCount++ } return chunkCount diff --git a/storage/series_test.go b/storage/series_test.go index 5f8ba68c03..3f6625aeb9 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -232,7 +232,6 @@ func TestSeriesToChunks(t *testing.T) { type histogramTest struct { samples []tsdbutil.Sample expectedCounterResetHeaders []chunkenc.CounterResetHeader - expectedChunkCountEstimate int } func TestHistogramSeriesToChunks(t *testing.T) { @@ -393,7 +392,6 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 2, h: h1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, - expectedChunkCountEstimate: 1, // Estimate doesn't consider counter resets. }, "histogram and stale sample encoded to two chunks": { samples: []tsdbutil.Sample{ @@ -401,7 +399,6 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 2, h: h1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, - expectedChunkCountEstimate: 1, // Estimate doesn't consider stale markers. }, "histogram and reduction in bucket encoded to two chunks": { samples: []tsdbutil.Sample{ @@ -409,7 +406,6 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 2, h: h2down}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, - expectedChunkCountEstimate: 1, // Estimate doesn't consider counter resets. }, // Float histograms. "single float histogram to single chunk": { @@ -431,7 +427,6 @@ func TestHistogramSeriesToChunks(t *testing.T) { fhSample{t: 2, fh: fh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, - expectedChunkCountEstimate: 1, // Estimate doesn't consider counter resets. }, "float histogram and stale sample encoded to two chunks": { samples: []tsdbutil.Sample{ @@ -439,7 +434,6 @@ func TestHistogramSeriesToChunks(t *testing.T) { fhSample{t: 2, fh: fh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, - expectedChunkCountEstimate: 1, // Estimate doesn't consider stale markers. }, "float histogram and reduction in bucket encoded to two chunks": { samples: []tsdbutil.Sample{ @@ -447,7 +441,6 @@ func TestHistogramSeriesToChunks(t *testing.T) { fhSample{t: 2, fh: fh2down}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, - expectedChunkCountEstimate: 1, // Estimate doesn't consider counter resets. }, // Mixed. "histogram and float histogram encoded to two chunks": { @@ -541,11 +534,7 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { require.NoError(t, err) require.Equal(t, len(test.expectedCounterResetHeaders), len(chks)) - if test.expectedChunkCountEstimate == 0 { - test.expectedChunkCountEstimate = len(chks) - } - - require.Equal(t, test.expectedChunkCountEstimate, encoder.EstimatedChunkCount()) + require.Len(t, chks, encoder.EstimatedChunkCount()) // Decode all encoded samples and assert they are equal to the original ones. encodedSamples := expandChunks(chks) From a9445622adb3adb9f86d55ddec9c80cc5d76e40a Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Thu, 6 Jul 2023 15:41:05 +1000 Subject: [PATCH 5/7] Permit returning an error from EstimatedChunkCount() --- storage/interface.go | 2 +- storage/merge.go | 18 ++++++++++++------ storage/merge_test.go | 9 +++++++-- storage/series.go | 10 +++++----- storage/series_test.go | 13 ++++++++++--- tsdb/querier.go | 4 ++-- 6 files changed, 37 insertions(+), 19 deletions(-) diff --git a/storage/interface.go b/storage/interface.go index 158c2354b8..110c17a5cb 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -420,7 +420,7 @@ type ChunkSeries interface { // // This estimate is used by Mimir's ingesters to report the number of chunks expected to be returned by a query, // which is used by queriers to enforce the 'max chunks per query' limit. - EstimatedChunkCount() int + EstimatedChunkCount() (int, error) } // Labels represents an item that has labels e.g. time series. diff --git a/storage/merge.go b/storage/merge.go index 33023b1932..892055feb7 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -673,7 +673,7 @@ func NewCompactingChunkSeriesMerger(mergeFunc VerticalSeriesMergeFunc) VerticalC return &ChunkSeriesEntry{ Lset: series[0].Labels(), ChunkIteratorFn: chunkIteratorFn, - ChunkCountFn: func() int { + ChunkCountFn: func() (int, error) { // This method is expensive, but we don't expect to ever actually use this on the ingester query path in Mimir - // it's just here to ensure things don't break if this assumption ever changes. // Ingesters return uncompacted chunks to queriers, so this method is never called. @@ -683,7 +683,7 @@ func NewCompactingChunkSeriesMerger(mergeFunc VerticalSeriesMergeFunc) VerticalC } } -func countChunks(chunkIteratorFn func(chunks.Iterator) chunks.Iterator) int { +func countChunks(chunkIteratorFn func(chunks.Iterator) chunks.Iterator) (int, error) { chunkCount := 0 it := chunkIteratorFn(nil) @@ -691,7 +691,7 @@ func countChunks(chunkIteratorFn func(chunks.Iterator) chunks.Iterator) int { chunkCount++ } - return chunkCount + return chunkCount, it.Err() } // compactChunkIterator is responsible to compact chunks from different iterators of the same time series into single chainSeries. @@ -833,14 +833,20 @@ func NewConcatenatingChunkSeriesMerger() VerticalChunkSeriesMergeFunc { iterators: iterators, } }, - ChunkCountFn: func() int { + ChunkCountFn: func() (int, error) { chunkCount := 0 for _, series := range series { - chunkCount += series.EstimatedChunkCount() + c, err := series.EstimatedChunkCount() + + if err != nil { + return 0, err + } + + chunkCount += c } - return chunkCount + return chunkCount, nil }, } } diff --git a/storage/merge_test.go b/storage/merge_test.go index 79772dd4ba..33342bc75c 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -579,7 +579,9 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { require.Equal(t, expErr, actErr) require.Equal(t, expChks, actChks) - require.Len(t, actChks, merged.EstimatedChunkCount()) + count, err := merged.EstimatedChunkCount() + require.NoError(t, err) + require.Len(t, actChks, count) }) } } @@ -714,7 +716,10 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) { require.Equal(t, expErr, actErr) require.Equal(t, expChks, actChks) - require.Equal(t, len(expChks), merged.EstimatedChunkCount()) + + count, err := merged.EstimatedChunkCount() + require.NoError(t, err) + require.Equal(t, len(expChks), count) }) } } diff --git a/storage/series.go b/storage/series.go index 3bc91abec7..5205eda8bb 100644 --- a/storage/series.go +++ b/storage/series.go @@ -35,13 +35,13 @@ func (s *SeriesEntry) Iterator(it chunkenc.Iterator) chunkenc.Iterator { return type ChunkSeriesEntry struct { Lset labels.Labels - ChunkCountFn func() int + ChunkCountFn func() (int, error) ChunkIteratorFn func(chunks.Iterator) chunks.Iterator } func (s *ChunkSeriesEntry) Labels() labels.Labels { return s.Lset } func (s *ChunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator { return s.ChunkIteratorFn(it) } -func (s *ChunkSeriesEntry) EstimatedChunkCount() int { return s.ChunkCountFn() } +func (s *ChunkSeriesEntry) EstimatedChunkCount() (int, error) { return s.ChunkCountFn() } // NewListSeries returns series entry with iterator that allows to iterate over provided samples. func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry { @@ -80,7 +80,7 @@ func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]tsdbutil.Sam } return NewListChunkSeriesIterator(chks...) }, - ChunkCountFn: func() int { return len(samples) }, // We create one chunk per slice of samples. + ChunkCountFn: func() (int, error) { return len(samples), nil }, // We create one chunk per slice of samples. } } @@ -403,7 +403,7 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { } // EstimatedChunkCount returns an estimate of the number of chunks produced by Iterator. -func (s *seriesToChunkEncoder) EstimatedChunkCount() int { +func (s *seriesToChunkEncoder) EstimatedChunkCount() (int, error) { // This method is expensive, but we don't expect to ever actually use this on the ingester query path in Mimir - // it's just here to ensure things don't break if this assumption ever changes. @@ -414,7 +414,7 @@ func (s *seriesToChunkEncoder) EstimatedChunkCount() int { chunkCount++ } - return chunkCount + return chunkCount, it.Err() } func appendChunk(chks []chunks.Meta, mint, maxt int64, chk chunkenc.Chunk) []chunks.Meta { diff --git a/storage/series_test.go b/storage/series_test.go index 3f6625aeb9..b9226db2c5 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -100,7 +100,10 @@ func TestNewListChunkSeriesFromSamples(t *testing.T) { require.NoError(t, it.Err()) require.Len(t, chks, 2) - require.Equal(t, len(chks), series.EstimatedChunkCount(), "should have one chunk per sample") + + count, err := series.EstimatedChunkCount() + require.NoError(t, err) + require.Equal(t, len(chks), count, "should have one chunk per group of samples") } // TestSeriesSetToChunkSet test the property of SeriesSet that says @@ -221,7 +224,9 @@ func TestSeriesToChunks(t *testing.T) { chks, err := ExpandChunks(encoder.Iterator(nil)) require.NoError(t, err) require.Len(t, chks, testCase.expectedChunkCount) - require.Equal(t, testCase.expectedChunkCount, encoder.EstimatedChunkCount()) + count, err := encoder.EstimatedChunkCount() + require.NoError(t, err) + require.Equal(t, testCase.expectedChunkCount, count) encodedSamples := expandChunks(chks) require.Equal(t, testCase.samples, encodedSamples) @@ -534,7 +539,9 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { require.NoError(t, err) require.Equal(t, len(test.expectedCounterResetHeaders), len(chks)) - require.Len(t, chks, encoder.EstimatedChunkCount()) + count, err := encoder.EstimatedChunkCount() + require.NoError(t, err) + require.Len(t, chks, count) // Decode all encoded samples and assert they are equal to the original ones. encodedSamples := expandChunks(chks) diff --git a/tsdb/querier.go b/tsdb/querier.go index cbee873baf..1c3c95f269 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -649,8 +649,8 @@ func (s *chunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator { return pi } -func (s *chunkSeriesEntry) EstimatedChunkCount() int { - return len(s.chks) +func (s *chunkSeriesEntry) EstimatedChunkCount() (int, error) { + return len(s.chks), nil } // populateWithDelSeriesIterator allows to iterate over samples for the single series. From a0634e740854458e75ddd996321cc2977f5a5f15 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Thu, 6 Jul 2023 15:42:44 +1000 Subject: [PATCH 6/7] Rename EstimatedChunkCount to ChunkCount --- storage/interface.go | 6 +++--- storage/merge.go | 2 +- storage/merge_test.go | 4 ++-- storage/series.go | 5 ++--- storage/series_test.go | 6 +++--- tsdb/querier.go | 2 +- 6 files changed, 12 insertions(+), 13 deletions(-) diff --git a/storage/interface.go b/storage/interface.go index 110c17a5cb..74ddc5acad 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -416,11 +416,11 @@ type ChunkSeries interface { Labels ChunkIterable - // EstimatedChunkCount returns an estimate of the number of chunks available from this ChunkSeries. + // ChunkCount returns the number of chunks available from this ChunkSeries. // - // This estimate is used by Mimir's ingesters to report the number of chunks expected to be returned by a query, + // This value is used by Mimir's ingesters to report the number of chunks expected to be returned by a query, // which is used by queriers to enforce the 'max chunks per query' limit. - EstimatedChunkCount() (int, error) + ChunkCount() (int, error) } // Labels represents an item that has labels e.g. time series. diff --git a/storage/merge.go b/storage/merge.go index 892055feb7..cfb3ce5f6a 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -837,7 +837,7 @@ func NewConcatenatingChunkSeriesMerger() VerticalChunkSeriesMergeFunc { chunkCount := 0 for _, series := range series { - c, err := series.EstimatedChunkCount() + c, err := series.ChunkCount() if err != nil { return 0, err diff --git a/storage/merge_test.go b/storage/merge_test.go index 33342bc75c..82627d9871 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -579,7 +579,7 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { require.Equal(t, expErr, actErr) require.Equal(t, expChks, actChks) - count, err := merged.EstimatedChunkCount() + count, err := merged.ChunkCount() require.NoError(t, err) require.Len(t, actChks, count) }) @@ -717,7 +717,7 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) { require.Equal(t, expErr, actErr) require.Equal(t, expChks, actChks) - count, err := merged.EstimatedChunkCount() + count, err := merged.ChunkCount() require.NoError(t, err) require.Equal(t, len(expChks), count) }) diff --git a/storage/series.go b/storage/series.go index 5205eda8bb..1d1aa820e9 100644 --- a/storage/series.go +++ b/storage/series.go @@ -41,7 +41,7 @@ type ChunkSeriesEntry struct { func (s *ChunkSeriesEntry) Labels() labels.Labels { return s.Lset } func (s *ChunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator { return s.ChunkIteratorFn(it) } -func (s *ChunkSeriesEntry) EstimatedChunkCount() (int, error) { return s.ChunkCountFn() } +func (s *ChunkSeriesEntry) ChunkCount() (int, error) { return s.ChunkCountFn() } // NewListSeries returns series entry with iterator that allows to iterate over provided samples. func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry { @@ -402,8 +402,7 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { return NewListChunkSeriesIterator(chks...) } -// EstimatedChunkCount returns an estimate of the number of chunks produced by Iterator. -func (s *seriesToChunkEncoder) EstimatedChunkCount() (int, error) { +func (s *seriesToChunkEncoder) ChunkCount() (int, error) { // This method is expensive, but we don't expect to ever actually use this on the ingester query path in Mimir - // it's just here to ensure things don't break if this assumption ever changes. diff --git a/storage/series_test.go b/storage/series_test.go index b9226db2c5..81f0080773 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -101,7 +101,7 @@ func TestNewListChunkSeriesFromSamples(t *testing.T) { require.NoError(t, it.Err()) require.Len(t, chks, 2) - count, err := series.EstimatedChunkCount() + count, err := series.ChunkCount() require.NoError(t, err) require.Equal(t, len(chks), count, "should have one chunk per group of samples") } @@ -224,7 +224,7 @@ func TestSeriesToChunks(t *testing.T) { chks, err := ExpandChunks(encoder.Iterator(nil)) require.NoError(t, err) require.Len(t, chks, testCase.expectedChunkCount) - count, err := encoder.EstimatedChunkCount() + count, err := encoder.ChunkCount() require.NoError(t, err) require.Equal(t, testCase.expectedChunkCount, count) @@ -539,7 +539,7 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { require.NoError(t, err) require.Equal(t, len(test.expectedCounterResetHeaders), len(chks)) - count, err := encoder.EstimatedChunkCount() + count, err := encoder.ChunkCount() require.NoError(t, err) require.Len(t, chks, count) diff --git a/tsdb/querier.go b/tsdb/querier.go index 1c3c95f269..dad5595492 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -649,7 +649,7 @@ func (s *chunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator { return pi } -func (s *chunkSeriesEntry) EstimatedChunkCount() (int, error) { +func (s *chunkSeriesEntry) ChunkCount() (int, error) { return len(s.chks), nil } From 85fe1b0d832a52ab6ec92000456fe435e4fc9725 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Thu, 6 Jul 2023 15:53:20 +1000 Subject: [PATCH 7/7] Fix linting issue. --- storage/merge.go | 1 - 1 file changed, 1 deletion(-) diff --git a/storage/merge.go b/storage/merge.go index cfb3ce5f6a..a196b0bc0d 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -838,7 +838,6 @@ func NewConcatenatingChunkSeriesMerger() VerticalChunkSeriesMergeFunc { for _, series := range series { c, err := series.ChunkCount() - if err != nil { return 0, err }