2022-09-20 10:05:50 -07:00
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
2023-09-13 08:45:06 -07:00
"context"
2022-09-20 10:05:50 -07:00
"errors"
2024-08-23 09:10:02 -07:00
"fmt"
2022-09-20 10:05:50 -07:00
"math"
2024-01-15 08:24:46 -08:00
"slices"
2023-07-08 05:45:56 -07:00
2023-11-17 03:29:36 -08:00
"github.com/oklog/ulid"
2022-09-20 10:05:50 -07:00
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
2024-06-24 05:41:44 -07:00
"github.com/prometheus/prometheus/util/annotations"
2022-09-20 10:05:50 -07:00
)
2024-07-16 06:20:18 -07:00
var _ IndexReader = & HeadAndOOOIndexReader { }
type HeadAndOOOIndexReader struct {
* headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible.
lastGarbageCollectedMmapRef chunks . ChunkDiskMapperRef
}
2024-03-29 16:33:15 -07:00
var _ chunkenc . Iterable = & mergedOOOChunks { }
// mergedOOOChunks holds the list of iterables for overlapping chunks.
type mergedOOOChunks struct {
chunkIterables [ ] chunkenc . Iterable
}
2024-04-03 01:14:34 -07:00
func ( o mergedOOOChunks ) Iterator ( iterator chunkenc . Iterator ) chunkenc . Iterator {
return storage . ChainSampleIteratorFromIterables ( iterator , o . chunkIterables )
}
2024-07-16 06:20:18 -07:00
func NewHeadAndOOOIndexReader ( head * Head , mint , maxt int64 , lastGarbageCollectedMmapRef chunks . ChunkDiskMapperRef ) * HeadAndOOOIndexReader {
hr := & headIndexReader {
head : head ,
mint : mint ,
maxt : maxt ,
}
return & HeadAndOOOIndexReader { hr , lastGarbageCollectedMmapRef }
}
func ( oh * HeadAndOOOIndexReader ) Series ( ref storage . SeriesRef , builder * labels . ScratchBuilder , chks * [ ] chunks . Meta ) error {
s := oh . head . series . getByID ( chunks . HeadSeriesRef ( ref ) )
if s == nil {
oh . head . metrics . seriesNotFound . Inc ( )
return storage . ErrNotFound
}
builder . Assign ( s . labels ( ) )
if chks == nil {
return nil
}
s . Lock ( )
defer s . Unlock ( )
* chks = ( * chks ) [ : 0 ]
if s . ooo != nil {
return getOOOSeriesChunks ( s , oh . mint , oh . maxt , oh . lastGarbageCollectedMmapRef , 0 , true , chks )
}
2024-08-12 09:14:41 -07:00
* chks = appendSeriesChunks ( s , oh . mint , oh . maxt , * chks )
2024-07-16 06:20:18 -07:00
return nil
}
2023-11-24 03:38:38 -08:00
// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so
// any chunk at or before this ref will not be considered. 0 disables this check.
//
// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then
// the oooHeadChunk will not be considered.
2024-06-24 05:41:44 -07:00
func getOOOSeriesChunks ( s * memSeries , mint , maxt int64 , lastGarbageCollectedMmapRef , maxMmapRef chunks . ChunkDiskMapperRef , includeInOrder bool , chks * [ ] chunks . Meta ) error {
2022-12-28 02:19:41 -08:00
tmpChks := make ( [ ] chunks . Meta , 0 , len ( s . ooo . oooMmappedChunks ) )
2022-09-20 10:05:50 -07:00
2024-07-03 07:08:07 -07:00
addChunk := func ( minT , maxT int64 , ref chunks . ChunkRef , chunk chunkenc . Chunk ) {
2022-09-20 10:05:50 -07:00
tmpChks = append ( tmpChks , chunks . Meta {
2024-08-23 09:10:02 -07:00
MinTime : minT ,
MaxTime : maxT ,
Ref : ref ,
Chunk : chunk ,
2022-09-20 10:05:50 -07:00
} )
}
2024-07-03 07:08:07 -07:00
// Collect all chunks that overlap the query range.
2022-12-28 02:19:41 -08:00
if s . ooo . oooHeadChunk != nil {
c := s . ooo . oooHeadChunk
2024-06-27 01:25:26 -07:00
if c . OverlapsClosedInterval ( mint , maxt ) && maxMmapRef == 0 {
2022-12-28 02:19:41 -08:00
ref := chunks . ChunkRef ( chunks . NewHeadChunkRef ( s . ref , s . oooHeadChunkID ( len ( s . ooo . oooMmappedChunks ) ) ) )
2024-07-03 07:08:07 -07:00
if len ( c . chunk . samples ) > 0 { // Empty samples happens in tests, at least.
2024-07-08 09:48:27 -07:00
chks , err := s . ooo . oooHeadChunk . chunk . ToEncodedChunks ( c . minTime , c . maxTime )
if err != nil {
handleChunkWriteError ( err )
return nil
}
for _ , chk := range chks {
2024-09-11 15:55:39 -07:00
addChunk ( chk . minTime , chk . maxTime , ref , chk . chunk )
2024-07-08 09:48:27 -07:00
}
} else {
2024-07-15 13:15:07 -07:00
var emptyChunk chunkenc . Chunk
addChunk ( c . minTime , c . maxTime , ref , emptyChunk )
2024-07-03 07:08:07 -07:00
}
2022-09-20 10:05:50 -07:00
}
}
2022-12-28 02:19:41 -08:00
for i := len ( s . ooo . oooMmappedChunks ) - 1 ; i >= 0 ; i -- {
c := s . ooo . oooMmappedChunks [ i ]
2024-06-27 01:25:26 -07:00
if c . OverlapsClosedInterval ( mint , maxt ) && ( maxMmapRef == 0 || maxMmapRef . GreaterThanOrEqualTo ( c . ref ) ) && ( lastGarbageCollectedMmapRef == 0 || c . ref . GreaterThan ( lastGarbageCollectedMmapRef ) ) {
2022-09-20 10:05:50 -07:00
ref := chunks . ChunkRef ( chunks . NewHeadChunkRef ( s . ref , s . oooHeadChunkID ( i ) ) )
2024-07-03 07:08:07 -07:00
addChunk ( c . minTime , c . maxTime , ref , nil )
2022-09-20 10:05:50 -07:00
}
}
2024-06-24 05:41:44 -07:00
if includeInOrder {
2024-08-12 09:14:41 -07:00
tmpChks = appendSeriesChunks ( s , mint , maxt , tmpChks )
2024-06-24 05:41:44 -07:00
}
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
// There is nothing to do if we did not collect any chunk.
2022-09-20 10:05:50 -07:00
if len ( tmpChks ) == 0 {
return nil
}
// Next we want to sort all the collected chunks by min time so we can find
// those that overlap.
2023-07-08 05:45:56 -07:00
slices . SortFunc ( tmpChks , lessByMinTimeAndMinRef )
2022-09-20 10:05:50 -07:00
2024-08-23 09:10:02 -07:00
// Next we want to iterate the sorted collected chunks and return composites for chunks that overlap with others.
2022-09-20 10:05:50 -07:00
// Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650)
2024-08-23 09:10:02 -07:00
// In the example 5 overlaps with 7 and 6 overlaps with 8 so we will return
// [5,7], [6,8].
toBeMerged := tmpChks [ 0 ]
2022-09-20 10:05:50 -07:00
for _ , c := range tmpChks [ 1 : ] {
2024-08-23 09:10:02 -07:00
if c . MinTime > toBeMerged . MaxTime {
// This chunk doesn't overlap. Send current toBeMerged to output and start a new one.
* chks = append ( * chks , toBeMerged )
toBeMerged = c
} else {
// Merge this chunk with existing toBeMerged.
if mm , ok := toBeMerged . Chunk . ( * multiMeta ) ; ok {
mm . metas = append ( mm . metas , c )
} else {
toBeMerged . Chunk = & multiMeta { metas : [ ] chunks . Meta { toBeMerged , c } }
}
if toBeMerged . MaxTime < c . MaxTime {
toBeMerged . MaxTime = c . MaxTime
2024-07-03 07:08:07 -07:00
}
2022-09-20 10:05:50 -07:00
}
}
2024-08-23 09:10:02 -07:00
* chks = append ( * chks , toBeMerged )
2022-09-20 10:05:50 -07:00
return nil
}
2024-08-23 09:10:02 -07:00
// Fake Chunk object to pass a set of Metas inside Meta.Chunk.
type multiMeta struct {
chunkenc . Chunk // We don't expect any of the methods to be called.
metas [ ] chunks . Meta
}
2024-07-16 06:20:18 -07:00
// LabelValues needs to be overridden from the headIndexReader implementation
// so we can return labels within either in-order range or ooo range.
func ( oh * HeadAndOOOIndexReader ) LabelValues ( ctx context . Context , name string , matchers ... * labels . Matcher ) ( [ ] string , error ) {
if oh . maxt < oh . head . MinTime ( ) && oh . maxt < oh . head . MinOOOTime ( ) || oh . mint > oh . head . MaxTime ( ) && oh . mint > oh . head . MaxOOOTime ( ) {
return [ ] string { } , nil
}
if len ( matchers ) == 0 {
return oh . head . postings . LabelValues ( ctx , name ) , nil
}
return labelValuesWithMatchers ( ctx , oh , name , matchers ... )
}
2023-09-21 13:53:51 -07:00
func lessByMinTimeAndMinRef ( a , b chunks . Meta ) int {
2023-10-16 07:23:26 -07:00
switch {
case a . MinTime < b . MinTime :
return - 1
case a . MinTime > b . MinTime :
return 1
2023-10-18 22:17:46 -07:00
}
switch {
case a . Ref < b . Ref :
return - 1
case a . Ref > b . Ref :
return 1
2023-10-16 07:23:26 -07:00
default :
return 0
2022-09-20 10:05:50 -07:00
}
}
2024-07-16 06:20:18 -07:00
type HeadAndOOOChunkReader struct {
head * Head
mint , maxt int64
cr * headChunkReader // If nil, only read OOO chunks.
maxMmapRef chunks . ChunkDiskMapperRef
oooIsoState * oooIsolationState
}
func NewHeadAndOOOChunkReader ( head * Head , mint , maxt int64 , cr * headChunkReader , oooIsoState * oooIsolationState , maxMmapRef chunks . ChunkDiskMapperRef ) * HeadAndOOOChunkReader {
return & HeadAndOOOChunkReader {
head : head ,
mint : mint ,
maxt : maxt ,
cr : cr ,
maxMmapRef : maxMmapRef ,
oooIsoState : oooIsoState ,
}
}
func ( cr * HeadAndOOOChunkReader ) ChunkOrIterable ( meta chunks . Meta ) ( chunkenc . Chunk , chunkenc . Iterable , error ) {
2024-08-23 09:10:02 -07:00
c , it , _ , err := cr . chunkOrIterable ( meta , false )
return c , it , err
}
2024-07-16 06:20:18 -07:00
2024-08-23 09:10:02 -07:00
// ChunkOrIterableWithCopy implements ChunkReaderWithCopy. The special Copy
// behaviour is only implemented for the in-order head chunk.
func ( cr * HeadAndOOOChunkReader ) ChunkOrIterableWithCopy ( meta chunks . Meta ) ( chunkenc . Chunk , chunkenc . Iterable , int64 , error ) {
return cr . chunkOrIterable ( meta , true )
}
func ( cr * HeadAndOOOChunkReader ) chunkOrIterable ( meta chunks . Meta , copyLastChunk bool ) ( chunkenc . Chunk , chunkenc . Iterable , int64 , error ) {
sid , cid , isOOO := unpackHeadChunkRef ( meta . Ref )
2024-07-16 06:20:18 -07:00
s := cr . head . series . getByID ( sid )
// This means that the series has been garbage collected.
if s == nil {
2024-08-23 09:10:02 -07:00
return nil , nil , 0 , storage . ErrNotFound
2024-07-16 06:20:18 -07:00
}
2024-08-23 09:10:02 -07:00
var isoState * isolationState
if cr . cr != nil {
isoState = cr . cr . isoState
2024-08-21 06:24:20 -07:00
}
2024-07-16 06:20:18 -07:00
2024-08-23 09:10:02 -07:00
s . Lock ( )
defer s . Unlock ( )
2024-07-16 06:20:18 -07:00
2024-08-23 09:10:02 -07:00
if meta . Chunk == nil {
c , maxt , err := cr . head . chunkFromSeries ( s , cid , isOOO , meta . MinTime , meta . MaxTime , isoState , copyLastChunk )
return c , nil , maxt , err
}
mm , ok := meta . Chunk . ( * multiMeta )
if ! ok { // Complete chunk was supplied.
return meta . Chunk , nil , meta . MaxTime , nil
}
// We have a composite meta: construct a composite iterable.
mc := & mergedOOOChunks { }
for _ , m := range mm . metas {
switch {
case m . Chunk != nil :
mc . chunkIterables = append ( mc . chunkIterables , m . Chunk )
default :
_ , cid , isOOO := unpackHeadChunkRef ( m . Ref )
iterable , _ , err := cr . head . chunkFromSeries ( s , cid , isOOO , m . MinTime , m . MaxTime , isoState , copyLastChunk )
if err != nil {
return nil , nil , 0 , fmt . Errorf ( "invalid head chunk: %w" , err )
}
mc . chunkIterables = append ( mc . chunkIterables , iterable )
}
2024-07-16 06:20:18 -07:00
}
2024-08-23 09:10:02 -07:00
return nil , mc , meta . MaxTime , nil
2024-07-16 06:20:18 -07:00
}
func ( cr * HeadAndOOOChunkReader ) Close ( ) error {
if cr . cr != nil && cr . cr . isoState != nil {
cr . cr . isoState . Close ( )
}
if cr . oooIsoState != nil {
cr . oooIsoState . Close ( )
}
return nil
}
2022-09-20 10:05:50 -07:00
type OOOCompactionHead struct {
2024-07-16 05:56:22 -07:00
head * Head
2022-09-20 10:05:50 -07:00
lastMmapRef chunks . ChunkDiskMapperRef
lastWBLFile int
postings [ ] storage . SeriesRef
chunkRange int64
mint , maxt int64 // Among all the compactable chunks.
}
// NewOOOCompactionHead does the following:
// 1. M-maps all the in-memory ooo chunks.
// 2. Compute the expected block ranges while iterating through all ooo series and store it.
// 3. Store the list of postings having ooo series.
// 4. Cuts a new WBL file for the OOO WBL.
// All the above together have a bit of CPU and memory overhead, and can have a bit of impact
// on the sample append latency. So call NewOOOCompactionHead only right before compaction.
2023-09-13 08:45:06 -07:00
func NewOOOCompactionHead ( ctx context . Context , head * Head ) ( * OOOCompactionHead , error ) {
2022-09-20 10:05:50 -07:00
ch := & OOOCompactionHead {
2024-07-16 05:56:22 -07:00
head : head ,
2023-02-10 06:18:15 -08:00
chunkRange : head . chunkRange . Load ( ) ,
mint : math . MaxInt64 ,
maxt : math . MinInt64 ,
2023-02-10 03:52:12 -08:00
}
2023-02-10 06:18:15 -08:00
2023-02-10 03:52:12 -08:00
if head . wbl != nil {
lastWBLFile , err := head . wbl . NextSegmentSync ( )
if err != nil {
return nil , err
}
ch . lastWBLFile = lastWBLFile
2022-09-20 10:05:50 -07:00
}
2024-07-16 05:56:22 -07:00
hr := headIndexReader { head : head , mint : ch . mint , maxt : ch . maxt }
2022-09-20 10:05:50 -07:00
n , v := index . AllPostingsKey ( )
2024-07-16 05:56:22 -07:00
// TODO: filter to series with OOO samples, before sorting.
p , err := hr . Postings ( ctx , n , v )
2022-09-20 10:05:50 -07:00
if err != nil {
return nil , err
}
2024-07-16 05:56:22 -07:00
p = hr . SortedPostings ( p )
2022-09-20 10:05:50 -07:00
var lastSeq , lastOff int
for p . Next ( ) {
seriesRef := p . At ( )
ms := head . series . getByID ( chunks . HeadSeriesRef ( seriesRef ) )
if ms == nil {
continue
}
// M-map the in-memory chunk and keep track of the last one.
// Also build the block ranges -> series map.
// TODO: consider having a lock specifically for ooo data.
ms . Lock ( )
2022-12-28 02:19:41 -08:00
if ms . ooo == nil {
ms . Unlock ( )
continue
}
2024-07-16 09:01:08 -07:00
var lastMmapRef chunks . ChunkDiskMapperRef
2024-08-12 10:49:00 -07:00
mmapRefs := ms . mmapCurrentOOOHeadChunk ( head . chunkDiskMapper , head . logger )
2024-07-08 09:48:27 -07:00
if len ( mmapRefs ) == 0 && len ( ms . ooo . oooMmappedChunks ) > 0 {
2022-09-20 10:05:50 -07:00
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
2024-07-08 09:48:27 -07:00
mmapRefs = [ ] chunks . ChunkDiskMapperRef { ms . ooo . oooMmappedChunks [ len ( ms . ooo . oooMmappedChunks ) - 1 ] . ref }
2022-09-20 10:05:50 -07:00
}
2024-07-16 09:01:08 -07:00
if len ( mmapRefs ) == 0 {
lastMmapRef = 0
} else {
lastMmapRef = mmapRefs [ len ( mmapRefs ) - 1 ]
2022-09-20 10:05:50 -07:00
}
2024-07-08 09:48:27 -07:00
seq , off := lastMmapRef . Unpack ( )
2022-09-20 10:05:50 -07:00
if seq > lastSeq || ( seq == lastSeq && off > lastOff ) {
2024-07-08 09:48:27 -07:00
ch . lastMmapRef , lastSeq , lastOff = lastMmapRef , seq , off
2022-09-20 10:05:50 -07:00
}
2022-12-28 02:19:41 -08:00
if len ( ms . ooo . oooMmappedChunks ) > 0 {
2022-09-20 10:05:50 -07:00
ch . postings = append ( ch . postings , seriesRef )
2022-12-28 02:19:41 -08:00
for _ , c := range ms . ooo . oooMmappedChunks {
2022-09-20 10:05:50 -07:00
if c . minTime < ch . mint {
ch . mint = c . minTime
}
if c . maxTime > ch . maxt {
ch . maxt = c . maxTime
}
}
}
ms . Unlock ( )
}
return ch , nil
}
func ( ch * OOOCompactionHead ) Index ( ) ( IndexReader , error ) {
return NewOOOCompactionHeadIndexReader ( ch ) , nil
}
func ( ch * OOOCompactionHead ) Chunks ( ) ( ChunkReader , error ) {
2024-07-16 05:56:22 -07:00
return NewHeadAndOOOChunkReader ( ch . head , ch . mint , ch . maxt , nil , nil , ch . lastMmapRef ) , nil
2022-09-20 10:05:50 -07:00
}
func ( ch * OOOCompactionHead ) Tombstones ( ) ( tombstones . Reader , error ) {
return tombstones . NewMemTombstones ( ) , nil
}
2023-11-17 03:29:36 -08:00
var oooCompactionHeadULID = ulid . MustParse ( "0000000000XX000COMPACTHEAD" )
2022-09-20 10:05:50 -07:00
func ( ch * OOOCompactionHead ) Meta ( ) BlockMeta {
return BlockMeta {
MinTime : ch . mint ,
MaxTime : ch . maxt ,
2023-11-17 03:29:36 -08:00
ULID : oooCompactionHeadULID ,
2022-09-20 10:05:50 -07:00
Stats : BlockStats {
NumSeries : uint64 ( len ( ch . postings ) ) ,
} ,
}
}
// CloneForTimeRange clones the OOOCompactionHead such that the IndexReader and ChunkReader
// obtained from this only looks at the m-map chunks within the given time ranges while not looking
// beyond the ch.lastMmapRef.
// Only the method of BlockReader interface are valid for the cloned OOOCompactionHead.
func ( ch * OOOCompactionHead ) CloneForTimeRange ( mint , maxt int64 ) * OOOCompactionHead {
return & OOOCompactionHead {
2024-07-16 05:56:22 -07:00
head : ch . head ,
2022-09-20 10:05:50 -07:00
lastMmapRef : ch . lastMmapRef ,
postings : ch . postings ,
chunkRange : ch . chunkRange ,
2024-07-16 05:56:22 -07:00
mint : mint ,
maxt : maxt ,
2022-09-20 10:05:50 -07:00
}
}
func ( ch * OOOCompactionHead ) Size ( ) int64 { return 0 }
func ( ch * OOOCompactionHead ) MinTime ( ) int64 { return ch . mint }
func ( ch * OOOCompactionHead ) MaxTime ( ) int64 { return ch . maxt }
func ( ch * OOOCompactionHead ) ChunkRange ( ) int64 { return ch . chunkRange }
func ( ch * OOOCompactionHead ) LastMmapRef ( ) chunks . ChunkDiskMapperRef { return ch . lastMmapRef }
func ( ch * OOOCompactionHead ) LastWBLFile ( ) int { return ch . lastWBLFile }
type OOOCompactionHeadIndexReader struct {
ch * OOOCompactionHead
}
func NewOOOCompactionHeadIndexReader ( ch * OOOCompactionHead ) IndexReader {
return & OOOCompactionHeadIndexReader { ch : ch }
}
func ( ir * OOOCompactionHeadIndexReader ) Symbols ( ) index . StringIter {
2024-07-16 05:56:22 -07:00
hr := headIndexReader { head : ir . ch . head , mint : ir . ch . mint , maxt : ir . ch . maxt }
return hr . Symbols ( )
2022-09-20 10:05:50 -07:00
}
2023-09-13 08:45:06 -07:00
func ( ir * OOOCompactionHeadIndexReader ) Postings ( _ context . Context , name string , values ... string ) ( index . Postings , error ) {
2022-09-20 10:05:50 -07:00
n , v := index . AllPostingsKey ( )
if name != n || len ( values ) != 1 || values [ 0 ] != v {
return nil , errors . New ( "only AllPostingsKey is supported" )
}
return index . NewListPostings ( ir . ch . postings ) , nil
}
2024-05-09 02:55:30 -07:00
func ( ir * OOOCompactionHeadIndexReader ) PostingsForLabelMatching ( context . Context , string , func ( string ) bool ) index . Postings {
return index . ErrPostings ( errors . New ( "not supported" ) )
}
2022-09-20 10:05:50 -07:00
func ( ir * OOOCompactionHeadIndexReader ) SortedPostings ( p index . Postings ) index . Postings {
// This will already be sorted from the Postings() call above.
return p
}
2024-01-29 03:57:27 -08:00
func ( ir * OOOCompactionHeadIndexReader ) ShardedPostings ( p index . Postings , shardIndex , shardCount uint64 ) index . Postings {
2024-07-16 05:56:22 -07:00
hr := headIndexReader { head : ir . ch . head , mint : ir . ch . mint , maxt : ir . ch . maxt }
return hr . ShardedPostings ( p , shardIndex , shardCount )
2024-01-29 03:57:27 -08:00
}
2022-12-15 10:19:15 -08:00
func ( ir * OOOCompactionHeadIndexReader ) Series ( ref storage . SeriesRef , builder * labels . ScratchBuilder , chks * [ ] chunks . Meta ) error {
2024-07-16 05:56:22 -07:00
s := ir . ch . head . series . getByID ( chunks . HeadSeriesRef ( ref ) )
if s == nil {
ir . ch . head . metrics . seriesNotFound . Inc ( )
return storage . ErrNotFound
}
2024-07-16 06:18:55 -07:00
builder . Assign ( s . labels ( ) )
2024-07-16 05:56:22 -07:00
s . Lock ( )
defer s . Unlock ( )
* chks = ( * chks ) [ : 0 ]
if s . ooo == nil {
return nil
}
return getOOOSeriesChunks ( s , ir . ch . mint , ir . ch . maxt , 0 , ir . ch . lastMmapRef , false , chks )
2022-09-20 10:05:50 -07:00
}
2023-09-14 07:02:04 -07:00
func ( ir * OOOCompactionHeadIndexReader ) SortedLabelValues ( _ context . Context , name string , matchers ... * labels . Matcher ) ( [ ] string , error ) {
2022-09-20 10:05:50 -07:00
return nil , errors . New ( "not implemented" )
}
2023-09-14 07:02:04 -07:00
func ( ir * OOOCompactionHeadIndexReader ) LabelValues ( _ context . Context , name string , matchers ... * labels . Matcher ) ( [ ] string , error ) {
2022-09-20 10:05:50 -07:00
return nil , errors . New ( "not implemented" )
}
2023-09-14 07:02:04 -07:00
func ( ir * OOOCompactionHeadIndexReader ) PostingsForMatchers ( _ context . Context , concurrent bool , ms ... * labels . Matcher ) ( index . Postings , error ) {
2022-09-20 10:05:50 -07:00
return nil , errors . New ( "not implemented" )
}
2023-09-14 01:39:51 -07:00
func ( ir * OOOCompactionHeadIndexReader ) LabelNames ( context . Context , ... * labels . Matcher ) ( [ ] string , error ) {
2022-09-20 10:05:50 -07:00
return nil , errors . New ( "not implemented" )
}
2023-09-14 07:02:04 -07:00
func ( ir * OOOCompactionHeadIndexReader ) LabelValueFor ( context . Context , storage . SeriesRef , string ) ( string , error ) {
2022-09-20 10:05:50 -07:00
return "" , errors . New ( "not implemented" )
}
2024-06-11 06:36:46 -07:00
func ( ir * OOOCompactionHeadIndexReader ) LabelNamesFor ( ctx context . Context , postings index . Postings ) ( [ ] string , error ) {
2022-09-20 10:05:50 -07:00
return nil , errors . New ( "not implemented" )
}
func ( ir * OOOCompactionHeadIndexReader ) Close ( ) error {
2024-07-16 05:56:22 -07:00
return nil
2022-09-20 10:05:50 -07:00
}
2024-06-24 05:41:44 -07:00
// HeadAndOOOQuerier queries both the head and the out-of-order head.
type HeadAndOOOQuerier struct {
mint , maxt int64
head * Head
index IndexReader
chunkr ChunkReader
2024-09-05 09:17:42 -07:00
querier storage . Querier // Used for LabelNames, LabelValues, but may be nil if head was truncated in the mean time, in which case we ignore it and not close it in the end.
2024-06-24 05:41:44 -07:00
}
func NewHeadAndOOOQuerier ( mint , maxt int64 , head * Head , oooIsoState * oooIsolationState , querier storage . Querier ) storage . Querier {
2024-07-15 12:07:12 -07:00
cr := & headChunkReader {
head : head ,
mint : mint ,
maxt : maxt ,
isoState : head . iso . State ( mint , maxt ) ,
}
2024-06-24 05:41:44 -07:00
return & HeadAndOOOQuerier {
mint : mint ,
maxt : maxt ,
head : head ,
index : NewHeadAndOOOIndexReader ( head , mint , maxt , oooIsoState . minRef ) ,
2024-07-15 12:07:12 -07:00
chunkr : NewHeadAndOOOChunkReader ( head , mint , maxt , cr , oooIsoState , 0 ) ,
2024-06-24 05:41:44 -07:00
querier : querier ,
}
}
func ( q * HeadAndOOOQuerier ) LabelValues ( ctx context . Context , name string , hints * storage . LabelHints , matchers ... * labels . Matcher ) ( [ ] string , annotations . Annotations , error ) {
2024-09-05 09:17:42 -07:00
if q . querier == nil {
return nil , nil , nil
}
2024-06-24 05:41:44 -07:00
return q . querier . LabelValues ( ctx , name , hints , matchers ... )
}
func ( q * HeadAndOOOQuerier ) LabelNames ( ctx context . Context , hints * storage . LabelHints , matchers ... * labels . Matcher ) ( [ ] string , annotations . Annotations , error ) {
2024-09-05 09:17:42 -07:00
if q . querier == nil {
return nil , nil , nil
}
2024-06-24 05:41:44 -07:00
return q . querier . LabelNames ( ctx , hints , matchers ... )
}
func ( q * HeadAndOOOQuerier ) Close ( ) error {
q . chunkr . Close ( )
2024-09-05 09:17:42 -07:00
if q . querier == nil {
return nil
}
2024-06-24 05:41:44 -07:00
return q . querier . Close ( )
}
func ( q * HeadAndOOOQuerier ) Select ( ctx context . Context , sortSeries bool , hints * storage . SelectHints , matchers ... * labels . Matcher ) storage . SeriesSet {
return selectSeriesSet ( ctx , sortSeries , hints , matchers , q . index , q . chunkr , q . head . tombstones , q . mint , q . maxt )
}
// HeadAndOOOChunkQuerier queries both the head and the out-of-order head.
type HeadAndOOOChunkQuerier struct {
mint , maxt int64
head * Head
index IndexReader
chunkr ChunkReader
querier storage . ChunkQuerier
}
func NewHeadAndOOOChunkQuerier ( mint , maxt int64 , head * Head , oooIsoState * oooIsolationState , querier storage . ChunkQuerier ) storage . ChunkQuerier {
2024-07-15 12:07:12 -07:00
cr := & headChunkReader {
head : head ,
mint : mint ,
maxt : maxt ,
isoState : head . iso . State ( mint , maxt ) ,
}
2024-06-24 05:41:44 -07:00
return & HeadAndOOOChunkQuerier {
mint : mint ,
maxt : maxt ,
head : head ,
index : NewHeadAndOOOIndexReader ( head , mint , maxt , oooIsoState . minRef ) ,
2024-07-15 12:07:12 -07:00
chunkr : NewHeadAndOOOChunkReader ( head , mint , maxt , cr , oooIsoState , 0 ) ,
2024-06-24 05:41:44 -07:00
querier : querier ,
}
}
func ( q * HeadAndOOOChunkQuerier ) LabelValues ( ctx context . Context , name string , hints * storage . LabelHints , matchers ... * labels . Matcher ) ( [ ] string , annotations . Annotations , error ) {
2024-09-09 03:43:02 -07:00
if q . querier == nil {
return nil , nil , nil
}
2024-06-24 05:41:44 -07:00
return q . querier . LabelValues ( ctx , name , hints , matchers ... )
}
func ( q * HeadAndOOOChunkQuerier ) LabelNames ( ctx context . Context , hints * storage . LabelHints , matchers ... * labels . Matcher ) ( [ ] string , annotations . Annotations , error ) {
2024-09-09 03:43:02 -07:00
if q . querier == nil {
return nil , nil , nil
}
2024-06-24 05:41:44 -07:00
return q . querier . LabelNames ( ctx , hints , matchers ... )
}
func ( q * HeadAndOOOChunkQuerier ) Close ( ) error {
q . chunkr . Close ( )
2024-09-09 03:43:02 -07:00
if q . querier == nil {
return nil
}
2024-06-24 05:41:44 -07:00
return q . querier . Close ( )
}
func ( q * HeadAndOOOChunkQuerier ) Select ( ctx context . Context , sortSeries bool , hints * storage . SelectHints , matchers ... * labels . Matcher ) storage . ChunkSeriesSet {
return selectChunkSeriesSet ( ctx , sortSeries , hints , matchers , rangeHeadULID , q . index , q . chunkr , q . head . tombstones , q . mint , q . maxt )
}