Merge pull request #14951 from prometheus/update-rel-2.55
Some checks failed
CI / Go tests (push) Has been cancelled
CI / More Go tests (push) Has been cancelled
CI / Go tests with previous Go version (push) Has been cancelled
CI / UI tests (push) Has been cancelled
CI / Go tests on Windows (push) Has been cancelled
CI / Mixins tests (push) Has been cancelled
CI / Build Prometheus for common architectures (0) (push) Has been cancelled
CI / Build Prometheus for common architectures (1) (push) Has been cancelled
CI / Build Prometheus for common architectures (2) (push) Has been cancelled
CI / Build Prometheus for all architectures (0) (push) Has been cancelled
CI / Build Prometheus for all architectures (1) (push) Has been cancelled
CI / Build Prometheus for all architectures (10) (push) Has been cancelled
CI / Build Prometheus for all architectures (11) (push) Has been cancelled
CI / Build Prometheus for all architectures (2) (push) Has been cancelled
CI / Build Prometheus for all architectures (3) (push) Has been cancelled
CI / Build Prometheus for all architectures (4) (push) Has been cancelled
CI / Build Prometheus for all architectures (5) (push) Has been cancelled
CI / Build Prometheus for all architectures (6) (push) Has been cancelled
CI / Build Prometheus for all architectures (7) (push) Has been cancelled
CI / Build Prometheus for all architectures (8) (push) Has been cancelled
CI / Build Prometheus for all architectures (9) (push) Has been cancelled
CI / Check generated parser (push) Has been cancelled
CI / golangci-lint (push) Has been cancelled
CI / fuzzing (push) Has been cancelled
CI / codeql (push) Has been cancelled
CI / Report status of build Prometheus for all architectures (push) Has been cancelled
CI / Publish main branch artifacts (push) Has been cancelled
CI / Publish release artefacts (push) Has been cancelled
CI / Publish UI on npm Registry (push) Has been cancelled

[release-2.55] Add #14948 to rc0
This commit is contained in:
Bryan Boreham 2024-09-20 18:42:51 +01:00 committed by GitHub
commit e0260930d6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 25 additions and 20 deletions

View file

@ -2,7 +2,7 @@
## unreleased ## unreleased
## 2.55.0-rc.0 / 2024-09-17 ## 2.55.0-rc.0 / 2024-09-20
* [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727 * [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727
* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769 * [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769
@ -23,7 +23,7 @@
* [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413 * [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413
* [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934 * [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934
* [PERF] Remote-Read: Support streaming mode. #11379 * [PERF] Remote-Read: Support streaming mode. #11379
* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874 * [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948
* [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729 * [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729
* [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147 * [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147
* [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622 * [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622

View file

@ -2043,6 +2043,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime())
var headQuerier storage.Querier var headQuerier storage.Querier
inoMint := mint
if maxt >= db.head.MinTime() || overlapsOOO { if maxt >= db.head.MinTime() || overlapsOOO {
rh := NewRangeHead(db.head, mint, maxt) rh := NewRangeHead(db.head, mint, maxt)
var err error var err error
@ -2067,13 +2068,14 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err) return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err)
} }
inoMint = newMint
} }
} }
if overlapsOOO { if overlapsOOO {
// We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier.
isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef)
headQuerier = NewHeadAndOOOQuerier(mint, maxt, db.head, isoState, headQuerier) headQuerier = NewHeadAndOOOQuerier(inoMint, mint, maxt, db.head, isoState, headQuerier)
} }
if headQuerier != nil { if headQuerier != nil {
@ -2119,6 +2121,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime())
var headQuerier storage.ChunkQuerier var headQuerier storage.ChunkQuerier
inoMint := mint
if maxt >= db.head.MinTime() || overlapsOOO { if maxt >= db.head.MinTime() || overlapsOOO {
rh := NewRangeHead(db.head, mint, maxt) rh := NewRangeHead(db.head, mint, maxt)
headQuerier, err = db.blockChunkQuerierFunc(rh, mint, maxt) headQuerier, err = db.blockChunkQuerierFunc(rh, mint, maxt)
@ -2142,13 +2145,14 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
if err != nil { if err != nil {
return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err) return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err)
} }
inoMint = newMint
} }
} }
if overlapsOOO { if overlapsOOO {
// We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier.
isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef)
headQuerier = NewHeadAndOOOChunkQuerier(mint, maxt, db.head, isoState, headQuerier) headQuerier = NewHeadAndOOOChunkQuerier(inoMint, mint, maxt, db.head, isoState, headQuerier)
} }
if headQuerier != nil { if headQuerier != nil {

View file

@ -35,6 +35,7 @@ var _ IndexReader = &HeadAndOOOIndexReader{}
type HeadAndOOOIndexReader struct { type HeadAndOOOIndexReader struct {
*headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible.
inoMint int64
lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef
} }
@ -49,13 +50,13 @@ func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator
return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables) return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables)
} }
func NewHeadAndOOOIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader { func NewHeadAndOOOIndexReader(head *Head, inoMint, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader {
hr := &headIndexReader{ hr := &headIndexReader{
head: head, head: head,
mint: mint, mint: mint,
maxt: maxt, maxt: maxt,
} }
return &HeadAndOOOIndexReader{hr, lastGarbageCollectedMmapRef} return &HeadAndOOOIndexReader{hr, inoMint, lastGarbageCollectedMmapRef}
} }
func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
@ -76,9 +77,9 @@ func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.S
*chks = (*chks)[:0] *chks = (*chks)[:0]
if s.ooo != nil { if s.ooo != nil {
return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, chks) return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, oh.inoMint, chks)
} }
*chks = appendSeriesChunks(s, oh.mint, oh.maxt, *chks) *chks = appendSeriesChunks(s, oh.inoMint, oh.maxt, *chks)
return nil return nil
} }
@ -87,7 +88,7 @@ func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.S
// //
// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then // maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then
// the oooHeadChunk will not be considered. // the oooHeadChunk will not be considered.
func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, chks *[]chunks.Meta) error { func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, inoMint int64, chks *[]chunks.Meta) error {
tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks)) tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks))
addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) { addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) {
@ -128,7 +129,7 @@ func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmap
} }
if includeInOrder { if includeInOrder {
tmpChks = appendSeriesChunks(s, mint, maxt, tmpChks) tmpChks = appendSeriesChunks(s, inoMint, maxt, tmpChks)
} }
// There is nothing to do if we did not collect any chunk. // There is nothing to do if we did not collect any chunk.
@ -476,7 +477,7 @@ func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *l
return nil return nil
} }
return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, chks) return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, 0, chks)
} }
func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
@ -516,7 +517,7 @@ type HeadAndOOOQuerier struct {
querier storage.Querier // Used for LabelNames, LabelValues, but may be nil if head was truncated in the mean time, in which case we ignore it and not close it in the end. querier storage.Querier // Used for LabelNames, LabelValues, but may be nil if head was truncated in the mean time, in which case we ignore it and not close it in the end.
} }
func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier { func NewHeadAndOOOQuerier(inoMint, mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier {
cr := &headChunkReader{ cr := &headChunkReader{
head: head, head: head,
mint: mint, mint: mint,
@ -527,7 +528,7 @@ func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolatio
mint: mint, mint: mint,
maxt: maxt, maxt: maxt,
head: head, head: head,
index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef), index: NewHeadAndOOOIndexReader(head, inoMint, mint, maxt, oooIsoState.minRef),
chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0), chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0),
querier: querier, querier: querier,
} }
@ -568,7 +569,7 @@ type HeadAndOOOChunkQuerier struct {
querier storage.ChunkQuerier querier storage.ChunkQuerier
} }
func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier { func NewHeadAndOOOChunkQuerier(inoMint, mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier {
cr := &headChunkReader{ cr := &headChunkReader{
head: head, head: head,
mint: mint, mint: mint,
@ -579,7 +580,7 @@ func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIso
mint: mint, mint: mint,
maxt: maxt, maxt: maxt,
head: head, head: head,
index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef), index: NewHeadAndOOOIndexReader(head, inoMint, mint, maxt, oooIsoState.minRef),
chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0), chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0),
querier: querier, querier: querier,
} }

View file

@ -360,7 +360,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
}) })
} }
ir := NewHeadAndOOOIndexReader(h, tc.queryMinT, tc.queryMaxT, 0) ir := NewHeadAndOOOIndexReader(h, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0)
var chks []chunks.Meta var chks []chunks.Meta
var b labels.ScratchBuilder var b labels.ScratchBuilder
@ -450,7 +450,7 @@ func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenari
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
// We first want to test using a head index reader that covers the biggest query interval // We first want to test using a head index reader that covers the biggest query interval
oh := NewHeadAndOOOIndexReader(head, tc.queryMinT, tc.queryMaxT, 0) oh := NewHeadAndOOOIndexReader(head, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0)
matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")} matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")}
values, err := oh.LabelValues(ctx, "foo", matchers...) values, err := oh.LabelValues(ctx, "foo", matchers...)
sort.Strings(values) sort.Strings(values)
@ -854,7 +854,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
// The Series method populates the chunk metas, taking a copy of the // The Series method populates the chunk metas, taking a copy of the
// head OOO chunk if necessary. These are then used by the ChunkReader. // head OOO chunk if necessary. These are then used by the ChunkReader.
ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0)
var chks []chunks.Meta var chks []chunks.Meta
var b labels.ScratchBuilder var b labels.ScratchBuilder
err = ir.Series(s1Ref, &b, &chks) err = ir.Series(s1Ref, &b, &chks)
@ -1023,7 +1023,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
// The Series method populates the chunk metas, taking a copy of the // The Series method populates the chunk metas, taking a copy of the
// head OOO chunk if necessary. These are then used by the ChunkReader. // head OOO chunk if necessary. These are then used by the ChunkReader.
ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0)
var chks []chunks.Meta var chks []chunks.Meta
var b labels.ScratchBuilder var b labels.ScratchBuilder
err = ir.Series(s1Ref, &b, &chks) err = ir.Series(s1Ref, &b, &chks)

View file

@ -3170,7 +3170,7 @@ func BenchmarkQueries(b *testing.B) {
qHead, err := NewBlockQuerier(NewRangeHead(head, 1, nSamples), 1, nSamples) qHead, err := NewBlockQuerier(NewRangeHead(head, 1, nSamples), 1, nSamples)
require.NoError(b, err) require.NoError(b, err)
isoState := head.oooIso.TrackReadAfter(0) isoState := head.oooIso.TrackReadAfter(0)
qOOOHead := NewHeadAndOOOQuerier(1, nSamples, head, isoState, qHead) qOOOHead := NewHeadAndOOOQuerier(1, 1, nSamples, head, isoState, qHead)
queryTypes = append(queryTypes, qt{ queryTypes = append(queryTypes, qt{
fmt.Sprintf("_Head_oooPercent:%d", oooPercentage), qOOOHead, fmt.Sprintf("_Head_oooPercent:%d", oooPercentage), qOOOHead,