2022-09-20 10:05:50 -07:00
|
|
|
// Copyright 2022 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package tsdb
|
|
|
|
|
|
|
|
import (
|
2023-09-13 08:45:06 -07:00
|
|
|
"context"
|
2022-09-20 10:05:50 -07:00
|
|
|
"errors"
|
|
|
|
"math"
|
2024-01-15 08:24:46 -08:00
|
|
|
"slices"
|
2023-07-08 05:45:56 -07:00
|
|
|
|
2023-11-17 03:29:36 -08:00
|
|
|
"github.com/oklog/ulid"
|
2022-09-20 10:05:50 -07:00
|
|
|
|
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
|
|
|
"github.com/prometheus/prometheus/storage"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/index"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
2024-06-24 05:41:44 -07:00
|
|
|
"github.com/prometheus/prometheus/util/annotations"
|
2022-09-20 10:05:50 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
var _ IndexReader = &OOOHeadIndexReader{}
|
|
|
|
|
|
|
|
// OOOHeadIndexReader implements IndexReader so ooo samples in the head can be
|
|
|
|
// accessed.
|
|
|
|
// It also has a reference to headIndexReader so we can leverage on its
|
|
|
|
// IndexReader implementation for all the methods that remain the same. We
|
|
|
|
// decided to do this to avoid code duplication.
|
|
|
|
// The only methods that change are the ones about getting Series and Postings.
|
|
|
|
type OOOHeadIndexReader struct {
|
2023-11-24 03:38:38 -08:00
|
|
|
*headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible.
|
|
|
|
lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
2024-03-29 16:33:15 -07:00
|
|
|
var _ chunkenc.Iterable = &mergedOOOChunks{}
|
|
|
|
|
|
|
|
// mergedOOOChunks holds the list of iterables for overlapping chunks.
|
|
|
|
type mergedOOOChunks struct {
|
|
|
|
chunkIterables []chunkenc.Iterable
|
|
|
|
}
|
|
|
|
|
2024-04-03 01:14:34 -07:00
|
|
|
func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator {
|
|
|
|
return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables)
|
|
|
|
}
|
|
|
|
|
2023-11-24 03:38:38 -08:00
|
|
|
func NewOOOHeadIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *OOOHeadIndexReader {
|
2022-09-20 10:05:50 -07:00
|
|
|
hr := &headIndexReader{
|
|
|
|
head: head,
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
}
|
2023-11-24 03:38:38 -08:00
|
|
|
return &OOOHeadIndexReader{hr, lastGarbageCollectedMmapRef}
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
2022-12-15 10:19:15 -08:00
|
|
|
func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
2023-11-24 03:38:38 -08:00
|
|
|
return oh.series(ref, builder, chks, oh.lastGarbageCollectedMmapRef, 0)
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
2023-11-24 03:38:38 -08:00
|
|
|
// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so
|
|
|
|
// any chunk at or before this ref will not be considered. 0 disables this check.
|
|
|
|
//
|
|
|
|
// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then
|
|
|
|
// the oooHeadChunk will not be considered.
|
|
|
|
func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef) error {
|
2022-09-20 10:05:50 -07:00
|
|
|
s := oh.head.series.getByID(chunks.HeadSeriesRef(ref))
|
|
|
|
|
|
|
|
if s == nil {
|
|
|
|
oh.head.metrics.seriesNotFound.Inc()
|
|
|
|
return storage.ErrNotFound
|
|
|
|
}
|
2024-07-05 02:11:32 -07:00
|
|
|
builder.Assign(s.labels())
|
2022-09-20 10:05:50 -07:00
|
|
|
|
|
|
|
if chks == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
*chks = (*chks)[:0]
|
|
|
|
|
2022-12-28 02:19:41 -08:00
|
|
|
if s.ooo == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-06-24 05:41:44 -07:00
|
|
|
return getOOOSeriesChunks(s, oh.mint, oh.maxt, lastGarbageCollectedMmapRef, maxMmapRef, false, chks)
|
2024-06-27 01:25:26 -07:00
|
|
|
}
|
|
|
|
|
2024-06-24 05:41:44 -07:00
|
|
|
func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, chks *[]chunks.Meta) error {
|
2022-12-28 02:19:41 -08:00
|
|
|
tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks))
|
2022-09-20 10:05:50 -07:00
|
|
|
|
2024-07-03 07:08:07 -07:00
|
|
|
addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) {
|
2022-09-20 10:05:50 -07:00
|
|
|
tmpChks = append(tmpChks, chunks.Meta{
|
2024-07-03 07:08:07 -07:00
|
|
|
MinTime: minT,
|
|
|
|
MaxTime: maxT,
|
|
|
|
Ref: ref,
|
|
|
|
Chunk: chunk,
|
2022-09-20 10:05:50 -07:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2024-07-03 07:08:07 -07:00
|
|
|
// Collect all chunks that overlap the query range.
|
2022-12-28 02:19:41 -08:00
|
|
|
if s.ooo.oooHeadChunk != nil {
|
|
|
|
c := s.ooo.oooHeadChunk
|
2024-06-27 01:25:26 -07:00
|
|
|
if c.OverlapsClosedInterval(mint, maxt) && maxMmapRef == 0 {
|
2022-12-28 02:19:41 -08:00
|
|
|
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
|
2024-07-03 07:08:07 -07:00
|
|
|
if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least.
|
2024-07-08 09:48:27 -07:00
|
|
|
chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(c.minTime, c.maxTime)
|
|
|
|
if err != nil {
|
|
|
|
handleChunkWriteError(err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
for _, chk := range chks {
|
|
|
|
addChunk(c.minTime, c.maxTime, ref, chk.chunk)
|
|
|
|
}
|
|
|
|
} else {
|
2024-07-15 13:15:07 -07:00
|
|
|
var emptyChunk chunkenc.Chunk
|
|
|
|
addChunk(c.minTime, c.maxTime, ref, emptyChunk)
|
2024-07-03 07:08:07 -07:00
|
|
|
}
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
}
|
2022-12-28 02:19:41 -08:00
|
|
|
for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- {
|
|
|
|
c := s.ooo.oooMmappedChunks[i]
|
2024-06-27 01:25:26 -07:00
|
|
|
if c.OverlapsClosedInterval(mint, maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) {
|
2022-09-20 10:05:50 -07:00
|
|
|
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i)))
|
2024-07-03 07:08:07 -07:00
|
|
|
addChunk(c.minTime, c.maxTime, ref, nil)
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-24 05:41:44 -07:00
|
|
|
if includeInOrder {
|
|
|
|
getSeriesChunks(s, mint, maxt, &tmpChks)
|
|
|
|
}
|
|
|
|
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
// There is nothing to do if we did not collect any chunk.
|
2022-09-20 10:05:50 -07:00
|
|
|
if len(tmpChks) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next we want to sort all the collected chunks by min time so we can find
|
|
|
|
// those that overlap.
|
2023-07-08 05:45:56 -07:00
|
|
|
slices.SortFunc(tmpChks, lessByMinTimeAndMinRef)
|
2022-09-20 10:05:50 -07:00
|
|
|
|
|
|
|
// Next we want to iterate the sorted collected chunks and only return the
|
|
|
|
// chunks Meta the first chunk that overlaps with others.
|
|
|
|
// Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650)
|
|
|
|
// In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to
|
2023-07-13 06:53:40 -07:00
|
|
|
// return chunk Metas for chunk 5 and chunk 6e
|
2022-09-20 10:05:50 -07:00
|
|
|
*chks = append(*chks, tmpChks[0])
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
maxTime := tmpChks[0].MaxTime // Tracks the maxTime of the previous "to be merged chunk".
|
2022-09-20 10:05:50 -07:00
|
|
|
for _, c := range tmpChks[1:] {
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch {
|
|
|
|
case c.MinTime > maxTime:
|
2022-09-20 10:05:50 -07:00
|
|
|
*chks = append(*chks, c)
|
|
|
|
maxTime = c.MaxTime
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case c.MaxTime > maxTime:
|
2022-09-20 10:05:50 -07:00
|
|
|
maxTime = c.MaxTime
|
|
|
|
(*chks)[len(*chks)-1].MaxTime = c.MaxTime
|
2024-07-03 07:08:07 -07:00
|
|
|
fallthrough
|
|
|
|
default:
|
|
|
|
// If the head OOO chunk is part of an output chunk, copy the chunk pointer.
|
|
|
|
if c.Chunk != nil {
|
|
|
|
(*chks)[len(*chks)-1].Chunk = c.Chunk
|
|
|
|
}
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// LabelValues needs to be overridden from the headIndexReader implementation due
|
|
|
|
// to the check that happens at the beginning where we make sure that the query
|
|
|
|
// interval overlaps with the head minooot and maxooot.
|
2023-09-14 07:02:04 -07:00
|
|
|
func (oh *OOOHeadIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
|
2022-09-20 10:05:50 -07:00
|
|
|
if oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxOOOTime() {
|
|
|
|
return []string{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(matchers) == 0 {
|
2023-09-14 07:02:04 -07:00
|
|
|
return oh.head.postings.LabelValues(ctx, name), nil
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
2023-09-14 07:02:04 -07:00
|
|
|
return labelValuesWithMatchers(ctx, oh, name, matchers...)
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type chunkMetaAndChunkDiskMapperRef struct {
|
2024-07-03 07:08:07 -07:00
|
|
|
meta chunks.Meta
|
|
|
|
ref chunks.ChunkDiskMapperRef
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
2023-09-21 13:53:51 -07:00
|
|
|
func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) int {
|
2023-10-16 07:23:26 -07:00
|
|
|
switch {
|
|
|
|
case a.meta.MinTime < b.meta.MinTime:
|
|
|
|
return -1
|
|
|
|
case a.meta.MinTime > b.meta.MinTime:
|
|
|
|
return 1
|
2023-10-18 22:17:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case a.meta.Ref < b.meta.Ref:
|
|
|
|
return -1
|
|
|
|
case a.meta.Ref > b.meta.Ref:
|
|
|
|
return 1
|
2023-10-16 07:23:26 -07:00
|
|
|
default:
|
|
|
|
return 0
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-21 13:53:51 -07:00
|
|
|
func lessByMinTimeAndMinRef(a, b chunks.Meta) int {
|
2023-10-16 07:23:26 -07:00
|
|
|
switch {
|
|
|
|
case a.MinTime < b.MinTime:
|
|
|
|
return -1
|
|
|
|
case a.MinTime > b.MinTime:
|
|
|
|
return 1
|
2023-10-18 22:17:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case a.Ref < b.Ref:
|
|
|
|
return -1
|
|
|
|
case a.Ref > b.Ref:
|
|
|
|
return 1
|
2023-10-16 07:23:26 -07:00
|
|
|
default:
|
|
|
|
return 0
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-13 08:45:06 -07:00
|
|
|
func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
|
2022-09-20 10:05:50 -07:00
|
|
|
switch len(values) {
|
|
|
|
case 0:
|
|
|
|
return index.EmptyPostings(), nil
|
|
|
|
case 1:
|
|
|
|
return oh.head.postings.Get(name, values[0]), nil // TODO(ganesh) Also call GetOOOPostings
|
|
|
|
default:
|
|
|
|
// TODO(ganesh) We want to only return postings for out of order series.
|
|
|
|
res := make([]index.Postings, 0, len(values))
|
|
|
|
for _, value := range values {
|
|
|
|
res = append(res, oh.head.postings.Get(name, value)) // TODO(ganesh) Also call GetOOOPostings
|
|
|
|
}
|
2023-09-13 08:45:06 -07:00
|
|
|
return index.Merge(ctx, res...), nil
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type OOOHeadChunkReader struct {
|
|
|
|
head *Head
|
|
|
|
mint, maxt int64
|
2023-11-24 03:38:38 -08:00
|
|
|
isoState *oooIsolationState
|
2024-08-03 15:24:39 -07:00
|
|
|
maxMmapRef chunks.ChunkDiskMapperRef
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
2024-08-03 15:24:39 -07:00
|
|
|
func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *OOOHeadChunkReader {
|
2022-09-20 10:05:50 -07:00
|
|
|
return &OOOHeadChunkReader{
|
2024-08-03 15:24:39 -07:00
|
|
|
head: head,
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
isoState: isoState,
|
|
|
|
maxMmapRef: maxMmapRef,
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
2022-09-20 10:05:50 -07:00
|
|
|
sid, _ := chunks.HeadChunkRef(meta.Ref).Unpack()
|
|
|
|
|
|
|
|
s := cr.head.series.getByID(sid)
|
|
|
|
// This means that the series has been garbage collected.
|
|
|
|
if s == nil {
|
2023-11-28 02:14:29 -08:00
|
|
|
return nil, nil, storage.ErrNotFound
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
s.Lock()
|
2022-12-28 02:19:41 -08:00
|
|
|
if s.ooo == nil {
|
|
|
|
// There is no OOO data for this series.
|
|
|
|
s.Unlock()
|
2023-11-28 02:14:29 -08:00
|
|
|
return nil, nil, storage.ErrNotFound
|
2022-12-28 02:19:41 -08:00
|
|
|
}
|
2024-06-24 05:41:44 -07:00
|
|
|
mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, nil, cr.mint, cr.maxt, cr.maxMmapRef)
|
2022-09-20 10:05:50 -07:00
|
|
|
s.Unlock()
|
|
|
|
if err != nil {
|
2023-11-28 02:14:29 -08:00
|
|
|
return nil, nil, err
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// This means that the query range did not overlap with the requested chunk.
|
2023-11-28 02:14:29 -08:00
|
|
|
if len(mc.chunkIterables) == 0 {
|
|
|
|
return nil, nil, storage.ErrNotFound
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
return nil, mc, nil
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (cr OOOHeadChunkReader) Close() error {
|
2023-11-24 03:38:38 -08:00
|
|
|
if cr.isoState != nil {
|
|
|
|
cr.isoState.Close()
|
|
|
|
}
|
2022-09-20 10:05:50 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type OOOCompactionHead struct {
|
|
|
|
oooIR *OOOHeadIndexReader
|
|
|
|
lastMmapRef chunks.ChunkDiskMapperRef
|
|
|
|
lastWBLFile int
|
|
|
|
postings []storage.SeriesRef
|
|
|
|
chunkRange int64
|
|
|
|
mint, maxt int64 // Among all the compactable chunks.
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewOOOCompactionHead does the following:
|
|
|
|
// 1. M-maps all the in-memory ooo chunks.
|
|
|
|
// 2. Compute the expected block ranges while iterating through all ooo series and store it.
|
|
|
|
// 3. Store the list of postings having ooo series.
|
|
|
|
// 4. Cuts a new WBL file for the OOO WBL.
|
|
|
|
// All the above together have a bit of CPU and memory overhead, and can have a bit of impact
|
|
|
|
// on the sample append latency. So call NewOOOCompactionHead only right before compaction.
|
2023-09-13 08:45:06 -07:00
|
|
|
func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, error) {
|
2022-09-20 10:05:50 -07:00
|
|
|
ch := &OOOCompactionHead{
|
2023-02-10 06:18:15 -08:00
|
|
|
chunkRange: head.chunkRange.Load(),
|
|
|
|
mint: math.MaxInt64,
|
|
|
|
maxt: math.MinInt64,
|
2023-02-10 03:52:12 -08:00
|
|
|
}
|
2023-02-10 06:18:15 -08:00
|
|
|
|
2023-02-10 03:52:12 -08:00
|
|
|
if head.wbl != nil {
|
|
|
|
lastWBLFile, err := head.wbl.NextSegmentSync()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
ch.lastWBLFile = lastWBLFile
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
2023-11-24 03:38:38 -08:00
|
|
|
ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64, 0)
|
2022-09-20 10:05:50 -07:00
|
|
|
n, v := index.AllPostingsKey()
|
|
|
|
|
|
|
|
// TODO: verify this gets only ooo samples.
|
2023-09-13 08:45:06 -07:00
|
|
|
p, err := ch.oooIR.Postings(ctx, n, v)
|
2022-09-20 10:05:50 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
p = ch.oooIR.SortedPostings(p)
|
|
|
|
|
|
|
|
var lastSeq, lastOff int
|
|
|
|
for p.Next() {
|
|
|
|
seriesRef := p.At()
|
|
|
|
ms := head.series.getByID(chunks.HeadSeriesRef(seriesRef))
|
|
|
|
if ms == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// M-map the in-memory chunk and keep track of the last one.
|
|
|
|
// Also build the block ranges -> series map.
|
|
|
|
// TODO: consider having a lock specifically for ooo data.
|
|
|
|
ms.Lock()
|
|
|
|
|
2022-12-28 02:19:41 -08:00
|
|
|
if ms.ooo == nil {
|
|
|
|
ms.Unlock()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-07-16 09:01:08 -07:00
|
|
|
var lastMmapRef chunks.ChunkDiskMapperRef
|
2024-07-08 09:48:27 -07:00
|
|
|
mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
|
|
|
|
if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
|
2022-09-20 10:05:50 -07:00
|
|
|
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
|
2024-07-08 09:48:27 -07:00
|
|
|
mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref}
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
2024-07-16 09:01:08 -07:00
|
|
|
if len(mmapRefs) == 0 {
|
|
|
|
lastMmapRef = 0
|
|
|
|
} else {
|
|
|
|
lastMmapRef = mmapRefs[len(mmapRefs)-1]
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
2024-07-08 09:48:27 -07:00
|
|
|
seq, off := lastMmapRef.Unpack()
|
2022-09-20 10:05:50 -07:00
|
|
|
if seq > lastSeq || (seq == lastSeq && off > lastOff) {
|
2024-07-08 09:48:27 -07:00
|
|
|
ch.lastMmapRef, lastSeq, lastOff = lastMmapRef, seq, off
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
2022-12-28 02:19:41 -08:00
|
|
|
if len(ms.ooo.oooMmappedChunks) > 0 {
|
2022-09-20 10:05:50 -07:00
|
|
|
ch.postings = append(ch.postings, seriesRef)
|
2022-12-28 02:19:41 -08:00
|
|
|
for _, c := range ms.ooo.oooMmappedChunks {
|
2022-09-20 10:05:50 -07:00
|
|
|
if c.minTime < ch.mint {
|
|
|
|
ch.mint = c.minTime
|
|
|
|
}
|
|
|
|
if c.maxTime > ch.maxt {
|
|
|
|
ch.maxt = c.maxTime
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ms.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
return ch, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ch *OOOCompactionHead) Index() (IndexReader, error) {
|
|
|
|
return NewOOOCompactionHeadIndexReader(ch), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) {
|
2024-08-03 15:24:39 -07:00
|
|
|
return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil, ch.lastMmapRef), nil
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) {
|
|
|
|
return tombstones.NewMemTombstones(), nil
|
|
|
|
}
|
|
|
|
|
2023-11-17 03:29:36 -08:00
|
|
|
var oooCompactionHeadULID = ulid.MustParse("0000000000XX000COMPACTHEAD")
|
|
|
|
|
2022-09-20 10:05:50 -07:00
|
|
|
func (ch *OOOCompactionHead) Meta() BlockMeta {
|
|
|
|
return BlockMeta{
|
|
|
|
MinTime: ch.mint,
|
|
|
|
MaxTime: ch.maxt,
|
2023-11-17 03:29:36 -08:00
|
|
|
ULID: oooCompactionHeadULID,
|
2022-09-20 10:05:50 -07:00
|
|
|
Stats: BlockStats{
|
|
|
|
NumSeries: uint64(len(ch.postings)),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CloneForTimeRange clones the OOOCompactionHead such that the IndexReader and ChunkReader
|
|
|
|
// obtained from this only looks at the m-map chunks within the given time ranges while not looking
|
|
|
|
// beyond the ch.lastMmapRef.
|
|
|
|
// Only the method of BlockReader interface are valid for the cloned OOOCompactionHead.
|
|
|
|
func (ch *OOOCompactionHead) CloneForTimeRange(mint, maxt int64) *OOOCompactionHead {
|
|
|
|
return &OOOCompactionHead{
|
2023-11-24 03:38:38 -08:00
|
|
|
oooIR: NewOOOHeadIndexReader(ch.oooIR.head, mint, maxt, 0),
|
2022-09-20 10:05:50 -07:00
|
|
|
lastMmapRef: ch.lastMmapRef,
|
|
|
|
postings: ch.postings,
|
|
|
|
chunkRange: ch.chunkRange,
|
|
|
|
mint: ch.mint,
|
|
|
|
maxt: ch.maxt,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ch *OOOCompactionHead) Size() int64 { return 0 }
|
|
|
|
func (ch *OOOCompactionHead) MinTime() int64 { return ch.mint }
|
|
|
|
func (ch *OOOCompactionHead) MaxTime() int64 { return ch.maxt }
|
|
|
|
func (ch *OOOCompactionHead) ChunkRange() int64 { return ch.chunkRange }
|
|
|
|
func (ch *OOOCompactionHead) LastMmapRef() chunks.ChunkDiskMapperRef { return ch.lastMmapRef }
|
|
|
|
func (ch *OOOCompactionHead) LastWBLFile() int { return ch.lastWBLFile }
|
|
|
|
|
|
|
|
type OOOCompactionHeadIndexReader struct {
|
|
|
|
ch *OOOCompactionHead
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewOOOCompactionHeadIndexReader(ch *OOOCompactionHead) IndexReader {
|
|
|
|
return &OOOCompactionHeadIndexReader{ch: ch}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ir *OOOCompactionHeadIndexReader) Symbols() index.StringIter {
|
|
|
|
return ir.ch.oooIR.Symbols()
|
|
|
|
}
|
|
|
|
|
2023-09-13 08:45:06 -07:00
|
|
|
func (ir *OOOCompactionHeadIndexReader) Postings(_ context.Context, name string, values ...string) (index.Postings, error) {
|
2022-09-20 10:05:50 -07:00
|
|
|
n, v := index.AllPostingsKey()
|
|
|
|
if name != n || len(values) != 1 || values[0] != v {
|
|
|
|
return nil, errors.New("only AllPostingsKey is supported")
|
|
|
|
}
|
|
|
|
return index.NewListPostings(ir.ch.postings), nil
|
|
|
|
}
|
|
|
|
|
2024-05-09 02:55:30 -07:00
|
|
|
func (ir *OOOCompactionHeadIndexReader) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings {
|
|
|
|
return index.ErrPostings(errors.New("not supported"))
|
|
|
|
}
|
|
|
|
|
2022-09-20 10:05:50 -07:00
|
|
|
func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.Postings {
|
|
|
|
// This will already be sorted from the Postings() call above.
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2024-01-29 03:57:27 -08:00
|
|
|
func (ir *OOOCompactionHeadIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings {
|
|
|
|
return ir.ch.oooIR.ShardedPostings(p, shardIndex, shardCount)
|
|
|
|
}
|
|
|
|
|
2022-12-15 10:19:15 -08:00
|
|
|
func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
2023-11-24 03:38:38 -08:00
|
|
|
return ir.ch.oooIR.series(ref, builder, chks, 0, ir.ch.lastMmapRef)
|
2022-09-20 10:05:50 -07:00
|
|
|
}
|
|
|
|
|
2023-09-14 07:02:04 -07:00
|
|
|
func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
|
2022-09-20 10:05:50 -07:00
|
|
|
return nil, errors.New("not implemented")
|
|
|
|
}
|
|
|
|
|
2023-09-14 07:02:04 -07:00
|
|
|
func (ir *OOOCompactionHeadIndexReader) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
|
2022-09-20 10:05:50 -07:00
|
|
|
return nil, errors.New("not implemented")
|
|
|
|
}
|
|
|
|
|
2023-09-14 07:02:04 -07:00
|
|
|
func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(_ context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) {
|
2022-09-20 10:05:50 -07:00
|
|
|
return nil, errors.New("not implemented")
|
|
|
|
}
|
|
|
|
|
2023-09-14 01:39:51 -07:00
|
|
|
func (ir *OOOCompactionHeadIndexReader) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) {
|
2022-09-20 10:05:50 -07:00
|
|
|
return nil, errors.New("not implemented")
|
|
|
|
}
|
|
|
|
|
2023-09-14 07:02:04 -07:00
|
|
|
func (ir *OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) {
|
2022-09-20 10:05:50 -07:00
|
|
|
return "", errors.New("not implemented")
|
|
|
|
}
|
|
|
|
|
2024-06-11 06:36:46 -07:00
|
|
|
func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) {
|
2022-09-20 10:05:50 -07:00
|
|
|
return nil, errors.New("not implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ir *OOOCompactionHeadIndexReader) Close() error {
|
|
|
|
return ir.ch.oooIR.Close()
|
|
|
|
}
|
2024-06-24 05:41:44 -07:00
|
|
|
|
|
|
|
// HeadAndOOOQuerier queries both the head and the out-of-order head.
|
|
|
|
type HeadAndOOOQuerier struct {
|
|
|
|
mint, maxt int64
|
|
|
|
head *Head
|
|
|
|
index IndexReader
|
|
|
|
chunkr ChunkReader
|
|
|
|
querier storage.Querier
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier {
|
2024-07-15 12:07:12 -07:00
|
|
|
cr := &headChunkReader{
|
|
|
|
head: head,
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
isoState: head.iso.State(mint, maxt),
|
|
|
|
}
|
2024-06-24 05:41:44 -07:00
|
|
|
return &HeadAndOOOQuerier{
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
head: head,
|
|
|
|
index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef),
|
2024-07-15 12:07:12 -07:00
|
|
|
chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0),
|
2024-06-24 05:41:44 -07:00
|
|
|
querier: querier,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (q *HeadAndOOOQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
|
|
|
return q.querier.LabelValues(ctx, name, hints, matchers...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (q *HeadAndOOOQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
|
|
|
return q.querier.LabelNames(ctx, hints, matchers...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (q *HeadAndOOOQuerier) Close() error {
|
|
|
|
q.chunkr.Close()
|
|
|
|
return q.querier.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (q *HeadAndOOOQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
|
|
|
|
return selectSeriesSet(ctx, sortSeries, hints, matchers, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt)
|
|
|
|
}
|
|
|
|
|
|
|
|
// HeadAndOOOChunkQuerier queries both the head and the out-of-order head.
|
|
|
|
type HeadAndOOOChunkQuerier struct {
|
|
|
|
mint, maxt int64
|
|
|
|
head *Head
|
|
|
|
index IndexReader
|
|
|
|
chunkr ChunkReader
|
|
|
|
querier storage.ChunkQuerier
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier {
|
2024-07-15 12:07:12 -07:00
|
|
|
cr := &headChunkReader{
|
|
|
|
head: head,
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
isoState: head.iso.State(mint, maxt),
|
|
|
|
}
|
2024-06-24 05:41:44 -07:00
|
|
|
return &HeadAndOOOChunkQuerier{
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
head: head,
|
|
|
|
index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef),
|
2024-07-15 12:07:12 -07:00
|
|
|
chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0),
|
2024-06-24 05:41:44 -07:00
|
|
|
querier: querier,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (q *HeadAndOOOChunkQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
|
|
|
return q.querier.LabelValues(ctx, name, hints, matchers...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (q *HeadAndOOOChunkQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
|
|
|
return q.querier.LabelNames(ctx, hints, matchers...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (q *HeadAndOOOChunkQuerier) Close() error {
|
|
|
|
q.chunkr.Close()
|
|
|
|
return q.querier.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (q *HeadAndOOOChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet {
|
|
|
|
return selectChunkSeriesSet(ctx, sortSeries, hints, matchers, rangeHeadULID, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt)
|
|
|
|
}
|
|
|
|
|
|
|
|
type HeadAndOOOIndexReader struct {
|
|
|
|
*headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible.
|
|
|
|
lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewHeadAndOOOIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader {
|
|
|
|
hr := &headIndexReader{
|
|
|
|
head: head,
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
}
|
|
|
|
return &HeadAndOOOIndexReader{hr, lastGarbageCollectedMmapRef}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
|
|
|
s := oh.head.series.getByID(chunks.HeadSeriesRef(ref))
|
|
|
|
if s == nil {
|
|
|
|
oh.head.metrics.seriesNotFound.Inc()
|
|
|
|
return storage.ErrNotFound
|
|
|
|
}
|
|
|
|
builder.Assign(s.lset)
|
|
|
|
|
|
|
|
if chks == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
*chks = (*chks)[:0]
|
|
|
|
|
|
|
|
if s.ooo != nil {
|
|
|
|
return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, chks)
|
|
|
|
}
|
|
|
|
getSeriesChunks(s, oh.mint, oh.maxt, chks)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type HeadAndOOOChunkReader struct {
|
2024-07-15 12:07:12 -07:00
|
|
|
head *Head
|
|
|
|
mint, maxt int64
|
|
|
|
cr *headChunkReader // If nil, only read OOO chunks.
|
2024-06-24 05:41:44 -07:00
|
|
|
maxMmapRef chunks.ChunkDiskMapperRef
|
|
|
|
oooIsoState *oooIsolationState
|
|
|
|
}
|
|
|
|
|
2024-07-15 12:07:12 -07:00
|
|
|
func NewHeadAndOOOChunkReader(head *Head, mint, maxt int64, cr *headChunkReader, oooIsoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOChunkReader {
|
2024-06-24 05:41:44 -07:00
|
|
|
return &HeadAndOOOChunkReader{
|
2024-07-15 12:07:12 -07:00
|
|
|
head: head,
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
cr: cr,
|
2024-06-24 05:41:44 -07:00
|
|
|
maxMmapRef: maxMmapRef,
|
|
|
|
oooIsoState: oooIsoState,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cr *HeadAndOOOChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
|
|
|
sid, _, isOOO := unpackHeadChunkRef(meta.Ref)
|
|
|
|
if !isOOO {
|
|
|
|
return cr.cr.ChunkOrIterable(meta)
|
|
|
|
}
|
|
|
|
|
2024-07-15 12:07:12 -07:00
|
|
|
s := cr.head.series.getByID(sid)
|
2024-06-24 05:41:44 -07:00
|
|
|
// This means that the series has been garbage collected.
|
|
|
|
if s == nil {
|
|
|
|
return nil, nil, storage.ErrNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Lock()
|
2024-07-15 12:07:12 -07:00
|
|
|
mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.cr, cr.mint, cr.maxt, cr.maxMmapRef)
|
2024-06-24 05:41:44 -07:00
|
|
|
s.Unlock()
|
|
|
|
|
|
|
|
return nil, mc, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pass through special behaviour for current head chunk.
|
|
|
|
func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) {
|
|
|
|
_, _, isOOO := unpackHeadChunkRef(meta.Ref)
|
|
|
|
if !isOOO {
|
|
|
|
return cr.cr.ChunkOrIterableWithCopy(meta)
|
|
|
|
}
|
|
|
|
chk, iter, err := cr.ChunkOrIterable(meta)
|
|
|
|
return chk, iter, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cr *HeadAndOOOChunkReader) Close() error {
|
2024-07-15 12:07:12 -07:00
|
|
|
if cr.cr != nil && cr.cr.isoState != nil {
|
2024-06-24 05:41:44 -07:00
|
|
|
cr.cr.isoState.Close()
|
|
|
|
}
|
|
|
|
if cr.oooIsoState != nil {
|
|
|
|
cr.oooIsoState.Close()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|