mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-24 05:04:05 -08:00
Simplify IndexReader.Series interface
Instead of passing in a `ScratchBuilder` and `Labels`, just pass the builder and the caller can extract labels from it. In many cases the caller didn't use the Labels value anyway. Now in `Labels.ScratchBuilder` we need a slightly different API: one to assign what will be the result, instead of overwriting some other `Labels`. This is safer and easier to reason about. Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
This commit is contained in:
parent
19f300e6f0
commit
10b27dfb84
|
@ -469,16 +469,15 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
lbls := labels.Labels{}
|
|
||||||
chks := []chunks.Meta{}
|
chks := []chunks.Meta{}
|
||||||
builder := labels.ScratchBuilder{}
|
builder := labels.ScratchBuilder{}
|
||||||
for p.Next() {
|
for p.Next() {
|
||||||
if err = ir.Series(p.At(), &builder, &lbls, &chks); err != nil {
|
if err = ir.Series(p.At(), &builder, &chks); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Amount of the block time range not covered by this series.
|
// Amount of the block time range not covered by this series.
|
||||||
uncovered := uint64(meta.MaxTime-meta.MinTime) - uint64(chks[len(chks)-1].MaxTime-chks[0].MinTime)
|
uncovered := uint64(meta.MaxTime-meta.MinTime) - uint64(chks[len(chks)-1].MaxTime-chks[0].MinTime)
|
||||||
lbls.Range(func(lbl labels.Label) {
|
builder.Labels().Range(func(lbl labels.Label) {
|
||||||
key := lbl.Name + "=" + lbl.Value
|
key := lbl.Name + "=" + lbl.Value
|
||||||
labelsUncovered[lbl.Name] += uncovered
|
labelsUncovered[lbl.Name] += uncovered
|
||||||
labelpairsUncovered[key] += uncovered
|
labelpairsUncovered[key] += uncovered
|
||||||
|
@ -591,9 +590,8 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err
|
||||||
totalChunks := 0
|
totalChunks := 0
|
||||||
var builder labels.ScratchBuilder
|
var builder labels.ScratchBuilder
|
||||||
for postingsr.Next() {
|
for postingsr.Next() {
|
||||||
lbsl := labels.Labels{}
|
|
||||||
var chks []chunks.Meta
|
var chks []chunks.Meta
|
||||||
if err := indexr.Series(postingsr.At(), &builder, &lbsl, &chks); err != nil {
|
if err := indexr.Series(postingsr.At(), &builder, &chks); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -592,15 +592,14 @@ func (b *ScratchBuilder) Sort() {
|
||||||
sort.Sort(b.add)
|
sort.Sort(b.add)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Asssign is for when you already have a Labels which you want this ScratchBuilder to return.
|
||||||
|
func (b *ScratchBuilder) Assign(ls Labels) {
|
||||||
|
b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice.
|
||||||
|
}
|
||||||
|
|
||||||
// Return the name/value pairs added so far as a Labels object.
|
// Return the name/value pairs added so far as a Labels object.
|
||||||
// Note: if you want them sorted, call Sort() first.
|
// Note: if you want them sorted, call Sort() first.
|
||||||
func (b *ScratchBuilder) Labels() Labels {
|
func (b *ScratchBuilder) Labels() Labels {
|
||||||
// Copy the slice, so the next use of ScratchBuilder doesn't overwrite.
|
// Copy the slice, so the next use of ScratchBuilder doesn't overwrite.
|
||||||
return append([]Label{}, b.add...)
|
return append([]Label{}, b.add...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the newly-built Labels out to ls, reusing its buffer if long enough.
|
|
||||||
// Callers must ensure that there are no other references to ls.
|
|
||||||
func (b *ScratchBuilder) Overwrite(ls *Labels) {
|
|
||||||
(*ls) = append((*ls)[:0], b.add...)
|
|
||||||
}
|
|
||||||
|
|
|
@ -622,7 +622,6 @@ func TestScratchBuilder(t *testing.T) {
|
||||||
want: FromStrings("ddd", "444"),
|
want: FromStrings("ddd", "444"),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
overwriteTarget := EmptyLabels()
|
|
||||||
t.Run(fmt.Sprint(i), func(t *testing.T) {
|
t.Run(fmt.Sprint(i), func(t *testing.T) {
|
||||||
b := ScratchBuilder{}
|
b := ScratchBuilder{}
|
||||||
for _, lbl := range tcase.add {
|
for _, lbl := range tcase.add {
|
||||||
|
@ -630,8 +629,8 @@ func TestScratchBuilder(t *testing.T) {
|
||||||
}
|
}
|
||||||
b.Sort()
|
b.Sort()
|
||||||
require.Equal(t, tcase.want, b.Labels())
|
require.Equal(t, tcase.want, b.Labels())
|
||||||
b.Overwrite(&overwriteTarget)
|
b.Assign(tcase.want)
|
||||||
require.Equal(t, tcase.want, overwriteTarget)
|
require.Equal(t, tcase.want, b.Labels())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,10 +79,10 @@ type IndexReader interface {
|
||||||
// by the label set of the underlying series.
|
// by the label set of the underlying series.
|
||||||
SortedPostings(index.Postings) index.Postings
|
SortedPostings(index.Postings) index.Postings
|
||||||
|
|
||||||
// Series populates the given labels and chunk metas for the series identified
|
// Series populates the given builder and chunk metas for the series identified
|
||||||
// by the reference.
|
// by the reference.
|
||||||
// Returns storage.ErrNotFound if the ref does not resolve to a known series.
|
// Returns storage.ErrNotFound if the ref does not resolve to a known series.
|
||||||
Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, lset *labels.Labels, chks *[]chunks.Meta) error
|
Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error
|
||||||
|
|
||||||
// LabelNames returns all the unique label names present in the index in sorted order.
|
// LabelNames returns all the unique label names present in the index in sorted order.
|
||||||
LabelNames(matchers ...*labels.Matcher) ([]string, error)
|
LabelNames(matchers ...*labels.Matcher) ([]string, error)
|
||||||
|
@ -499,8 +499,8 @@ func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings {
|
||||||
return r.ir.SortedPostings(p)
|
return r.ir.SortedPostings(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r blockIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, lset *labels.Labels, chks *[]chunks.Meta) error {
|
func (r blockIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
||||||
if err := r.ir.Series(ref, builder, lset, chks); err != nil {
|
if err := r.ir.Series(ref, builder, chks); err != nil {
|
||||||
return errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
return errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -561,13 +561,12 @@ func (pb *Block) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
|
||||||
// Choose only valid postings which have chunks in the time-range.
|
// Choose only valid postings which have chunks in the time-range.
|
||||||
stones := tombstones.NewMemTombstones()
|
stones := tombstones.NewMemTombstones()
|
||||||
|
|
||||||
var lset labels.Labels
|
|
||||||
var chks []chunks.Meta
|
var chks []chunks.Meta
|
||||||
var builder labels.ScratchBuilder
|
var builder labels.ScratchBuilder
|
||||||
|
|
||||||
Outer:
|
Outer:
|
||||||
for p.Next() {
|
for p.Next() {
|
||||||
err := ir.Series(p.At(), &builder, &lset, &chks)
|
err := ir.Series(p.At(), &builder, &chks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1843,15 +1843,12 @@ func TestChunkAtBlockBoundary(t *testing.T) {
|
||||||
p, err := r.Postings(k, v)
|
p, err := r.Postings(k, v)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var (
|
var chks []chunks.Meta
|
||||||
lset labels.Labels
|
|
||||||
chks []chunks.Meta
|
|
||||||
)
|
|
||||||
|
|
||||||
chunkCount := 0
|
chunkCount := 0
|
||||||
|
|
||||||
for p.Next() {
|
for p.Next() {
|
||||||
err = r.Series(p.At(), &builder, &lset, &chks)
|
err = r.Series(p.At(), &builder, &chks)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, c := range chks {
|
for _, c := range chks {
|
||||||
require.True(t, meta.MinTime <= c.MinTime && c.MaxTime <= meta.MaxTime,
|
require.True(t, meta.MinTime <= c.MinTime && c.MaxTime <= meta.MaxTime,
|
||||||
|
|
|
@ -148,14 +148,14 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Series returns the series for the given reference.
|
// Series returns the series for the given reference.
|
||||||
func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, lbls *labels.Labels, chks *[]chunks.Meta) error {
|
func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
||||||
s := h.head.series.getByID(chunks.HeadSeriesRef(ref))
|
s := h.head.series.getByID(chunks.HeadSeriesRef(ref))
|
||||||
|
|
||||||
if s == nil {
|
if s == nil {
|
||||||
h.head.metrics.seriesNotFound.Inc()
|
h.head.metrics.seriesNotFound.Inc()
|
||||||
return storage.ErrNotFound
|
return storage.ErrNotFound
|
||||||
}
|
}
|
||||||
lbls.CopyFrom(s.lset)
|
builder.Assign(s.lset)
|
||||||
|
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
|
|
|
@ -1452,13 +1452,12 @@ func TestGCChunkAccess(t *testing.T) {
|
||||||
|
|
||||||
idx := h.indexRange(0, 1500)
|
idx := h.indexRange(0, 1500)
|
||||||
var (
|
var (
|
||||||
lset labels.Labels
|
|
||||||
chunks []chunks.Meta
|
chunks []chunks.Meta
|
||||||
builder labels.ScratchBuilder
|
builder labels.ScratchBuilder
|
||||||
)
|
)
|
||||||
require.NoError(t, idx.Series(1, &builder, &lset, &chunks))
|
require.NoError(t, idx.Series(1, &builder, &chunks))
|
||||||
|
|
||||||
require.Equal(t, labels.FromStrings("a", "1"), lset)
|
require.Equal(t, labels.FromStrings("a", "1"), builder.Labels())
|
||||||
require.Equal(t, 2, len(chunks))
|
require.Equal(t, 2, len(chunks))
|
||||||
|
|
||||||
cr, err := h.chunksRange(0, 1500, nil)
|
cr, err := h.chunksRange(0, 1500, nil)
|
||||||
|
@ -1506,13 +1505,12 @@ func TestGCSeriesAccess(t *testing.T) {
|
||||||
|
|
||||||
idx := h.indexRange(0, 2000)
|
idx := h.indexRange(0, 2000)
|
||||||
var (
|
var (
|
||||||
lset labels.Labels
|
|
||||||
chunks []chunks.Meta
|
chunks []chunks.Meta
|
||||||
builder labels.ScratchBuilder
|
builder labels.ScratchBuilder
|
||||||
)
|
)
|
||||||
require.NoError(t, idx.Series(1, &builder, &lset, &chunks))
|
require.NoError(t, idx.Series(1, &builder, &chunks))
|
||||||
|
|
||||||
require.Equal(t, labels.FromStrings("a", "1"), lset)
|
require.Equal(t, labels.FromStrings("a", "1"), builder.Labels())
|
||||||
require.Equal(t, 2, len(chunks))
|
require.Equal(t, 2, len(chunks))
|
||||||
|
|
||||||
cr, err := h.chunksRange(0, 2000, nil)
|
cr, err := h.chunksRange(0, 2000, nil)
|
||||||
|
|
|
@ -1596,8 +1596,8 @@ func (r *Reader) LabelValueFor(id storage.SeriesRef, label string) (string, erro
|
||||||
return value, nil
|
return value, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Series reads the series with the given ID and writes its labels and chunks into lbls and chks.
|
// Series reads the series with the given ID and writes its labels and chunks into builder and chks.
|
||||||
func (r *Reader) Series(id storage.SeriesRef, builder *labels.ScratchBuilder, lbls *labels.Labels, chks *[]chunks.Meta) error {
|
func (r *Reader) Series(id storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
||||||
offset := id
|
offset := id
|
||||||
// In version 2 series IDs are no longer exact references but series are 16-byte padded
|
// In version 2 series IDs are no longer exact references but series are 16-byte padded
|
||||||
// and the ID is the multiple of 16 of the actual position.
|
// and the ID is the multiple of 16 of the actual position.
|
||||||
|
@ -1608,7 +1608,7 @@ func (r *Reader) Series(id storage.SeriesRef, builder *labels.ScratchBuilder, lb
|
||||||
if d.Err() != nil {
|
if d.Err() != nil {
|
||||||
return d.Err()
|
return d.Err()
|
||||||
}
|
}
|
||||||
return errors.Wrap(r.dec.Series(d.Get(), builder, lbls, chks), "read series")
|
return errors.Wrap(r.dec.Series(d.Get(), builder, chks), "read series")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) Postings(name string, values ...string) (Postings, error) {
|
func (r *Reader) Postings(name string, values ...string) (Postings, error) {
|
||||||
|
@ -1835,9 +1835,9 @@ func (dec *Decoder) LabelValueFor(b []byte, label string) (string, error) {
|
||||||
return "", d.Err()
|
return "", d.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Series decodes a series entry from the given byte slice into lset and chks.
|
// Series decodes a series entry from the given byte slice into builder and chks.
|
||||||
// Previous contents of lbls can be overwritten - make sure you copy before retaining.
|
// Previous contents of lbls can be overwritten - make sure you copy before retaining.
|
||||||
func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, lbls *labels.Labels, chks *[]chunks.Meta) error {
|
func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
||||||
builder.Reset()
|
builder.Reset()
|
||||||
*chks = (*chks)[:0]
|
*chks = (*chks)[:0]
|
||||||
|
|
||||||
|
@ -1864,7 +1864,6 @@ func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, lbls *label
|
||||||
|
|
||||||
builder.Add(ln, lv)
|
builder.Add(ln, lv)
|
||||||
}
|
}
|
||||||
builder.Overwrite(lbls)
|
|
||||||
|
|
||||||
// Read the chunks meta data.
|
// Read the chunks meta data.
|
||||||
k = d.Uvarint()
|
k = d.Uvarint()
|
||||||
|
|
|
@ -124,12 +124,12 @@ func (m mockIndex) SortedPostings(p Postings) Postings {
|
||||||
return NewListPostings(ep)
|
return NewListPostings(ep)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m mockIndex) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, lset *labels.Labels, chks *[]chunks.Meta) error {
|
func (m mockIndex) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
||||||
s, ok := m.series[ref]
|
s, ok := m.series[ref]
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.New("not found")
|
return errors.New("not found")
|
||||||
}
|
}
|
||||||
lset.CopyFrom(s.l)
|
builder.Assign(s.l)
|
||||||
*chks = append((*chks)[:0], s.chunks...)
|
*chks = append((*chks)[:0], s.chunks...)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -197,16 +197,15 @@ func TestIndexRW_Postings(t *testing.T) {
|
||||||
p, err := ir.Postings("a", "1")
|
p, err := ir.Postings("a", "1")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var l labels.Labels
|
|
||||||
var c []chunks.Meta
|
var c []chunks.Meta
|
||||||
var builder labels.ScratchBuilder
|
var builder labels.ScratchBuilder
|
||||||
|
|
||||||
for i := 0; p.Next(); i++ {
|
for i := 0; p.Next(); i++ {
|
||||||
err := ir.Series(p.At(), &builder, &l, &c)
|
err := ir.Series(p.At(), &builder, &c)
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 0, len(c))
|
require.Equal(t, 0, len(c))
|
||||||
require.Equal(t, series[i], l)
|
require.Equal(t, series[i], builder.Labels())
|
||||||
}
|
}
|
||||||
require.NoError(t, p.Err())
|
require.NoError(t, p.Err())
|
||||||
|
|
||||||
|
@ -318,11 +317,10 @@ func TestPostingsMany(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
got := []string{}
|
got := []string{}
|
||||||
var lbls labels.Labels
|
|
||||||
var metas []chunks.Meta
|
var metas []chunks.Meta
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
require.NoError(t, ir.Series(it.At(), &builder, &lbls, &metas))
|
require.NoError(t, ir.Series(it.At(), &builder, &metas))
|
||||||
got = append(got, lbls.Copy().Get("i"))
|
got = append(got, builder.Labels().Get("i"))
|
||||||
}
|
}
|
||||||
require.NoError(t, it.Err())
|
require.NoError(t, it.Err())
|
||||||
exp := []string{}
|
exp := []string{}
|
||||||
|
@ -421,21 +419,20 @@ func TestPersistence_index_e2e(t *testing.T) {
|
||||||
expp, err := mi.Postings(p.Name, p.Value)
|
expp, err := mi.Postings(p.Name, p.Value)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var lset, explset labels.Labels
|
|
||||||
var chks, expchks []chunks.Meta
|
var chks, expchks []chunks.Meta
|
||||||
var builder labels.ScratchBuilder
|
var builder, eBuilder labels.ScratchBuilder
|
||||||
|
|
||||||
for gotp.Next() {
|
for gotp.Next() {
|
||||||
require.True(t, expp.Next())
|
require.True(t, expp.Next())
|
||||||
|
|
||||||
ref := gotp.At()
|
ref := gotp.At()
|
||||||
|
|
||||||
err := ir.Series(ref, &builder, &lset, &chks)
|
err := ir.Series(ref, &builder, &chks)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = mi.Series(expp.At(), &builder, &explset, &expchks)
|
err = mi.Series(expp.At(), &eBuilder, &expchks)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, explset, lset)
|
require.Equal(t, eBuilder.Labels(), builder.Labels())
|
||||||
require.Equal(t, expchks, chks)
|
require.Equal(t, expchks, chks)
|
||||||
}
|
}
|
||||||
require.False(t, expp.Next(), "Expected no more postings for %q=%q", p.Name, p.Value)
|
require.False(t, expp.Next(), "Expected no more postings for %q=%q", p.Name, p.Value)
|
||||||
|
|
|
@ -47,21 +47,21 @@ func NewOOOHeadIndexReader(head *Head, mint, maxt int64) *OOOHeadIndexReader {
|
||||||
return &OOOHeadIndexReader{hr}
|
return &OOOHeadIndexReader{hr}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, lbls *labels.Labels, chks *[]chunks.Meta) error {
|
func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
||||||
return oh.series(ref, lbls, chks, 0)
|
return oh.series(ref, builder, chks, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The passed lastMmapRef tells upto what max m-map chunk that we can consider.
|
// The passed lastMmapRef tells upto what max m-map chunk that we can consider.
|
||||||
// If it is 0, it means all chunks need to be considered.
|
// If it is 0, it means all chunks need to be considered.
|
||||||
// If it is non-0, then the oooHeadChunk must not be considered.
|
// If it is non-0, then the oooHeadChunk must not be considered.
|
||||||
func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, lbls *labels.Labels, chks *[]chunks.Meta, lastMmapRef chunks.ChunkDiskMapperRef) error {
|
func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta, lastMmapRef chunks.ChunkDiskMapperRef) error {
|
||||||
s := oh.head.series.getByID(chunks.HeadSeriesRef(ref))
|
s := oh.head.series.getByID(chunks.HeadSeriesRef(ref))
|
||||||
|
|
||||||
if s == nil {
|
if s == nil {
|
||||||
oh.head.metrics.seriesNotFound.Inc()
|
oh.head.metrics.seriesNotFound.Inc()
|
||||||
return storage.ErrNotFound
|
return storage.ErrNotFound
|
||||||
}
|
}
|
||||||
lbls.CopyFrom(s.lset)
|
builder.Assign(s.lset)
|
||||||
|
|
||||||
if chks == nil {
|
if chks == nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -400,8 +400,8 @@ func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.P
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, lset *labels.Labels, chks *[]chunks.Meta) error {
|
func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
||||||
return ir.ch.oooIR.series(ref, lset, chks, ir.ch.lastMmapRef)
|
return ir.ch.oooIR.series(ref, builder, chks, ir.ch.lastMmapRef)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) {
|
func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) {
|
||||||
|
|
|
@ -357,14 +357,13 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
||||||
ir := NewOOOHeadIndexReader(h, tc.queryMinT, tc.queryMaxT)
|
ir := NewOOOHeadIndexReader(h, tc.queryMinT, tc.queryMaxT)
|
||||||
|
|
||||||
var chks []chunks.Meta
|
var chks []chunks.Meta
|
||||||
var respLset labels.Labels
|
|
||||||
var b labels.ScratchBuilder
|
var b labels.ScratchBuilder
|
||||||
err := ir.Series(storage.SeriesRef(s1ID), &b, &respLset, &chks)
|
err := ir.Series(storage.SeriesRef(s1ID), &b, &chks)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, s1Lset, respLset)
|
require.Equal(t, s1Lset, b.Labels())
|
||||||
require.Equal(t, expChunks, chks)
|
require.Equal(t, expChunks, chks)
|
||||||
|
|
||||||
err = ir.Series(storage.SeriesRef(s1ID+1), &b, &respLset, &chks)
|
err = ir.Series(storage.SeriesRef(s1ID+1), &b, &chks)
|
||||||
require.Equal(t, storage.ErrNotFound, err)
|
require.Equal(t, storage.ErrNotFound, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -841,9 +840,8 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
// markers like OOOLastRef. These are then used by the ChunkReader.
|
// markers like OOOLastRef. These are then used by the ChunkReader.
|
||||||
ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT)
|
ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT)
|
||||||
var chks []chunks.Meta
|
var chks []chunks.Meta
|
||||||
var respLset labels.Labels
|
|
||||||
var b labels.ScratchBuilder
|
var b labels.ScratchBuilder
|
||||||
err := ir.Series(s1Ref, &b, &respLset, &chks)
|
err := ir.Series(s1Ref, &b, &chks)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(tc.expChunksSamples), len(chks))
|
require.Equal(t, len(tc.expChunksSamples), len(chks))
|
||||||
|
|
||||||
|
@ -1005,9 +1003,8 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
||||||
// markers like OOOLastRef. These are then used by the ChunkReader.
|
// markers like OOOLastRef. These are then used by the ChunkReader.
|
||||||
ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT)
|
ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT)
|
||||||
var chks []chunks.Meta
|
var chks []chunks.Meta
|
||||||
var respLset labels.Labels
|
|
||||||
var b labels.ScratchBuilder
|
var b labels.ScratchBuilder
|
||||||
err := ir.Series(s1Ref, &b, &respLset, &chks)
|
err := ir.Series(s1Ref, &b, &chks)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(tc.expChunksSamples), len(chks))
|
require.Equal(t, len(tc.expChunksSamples), len(chks))
|
||||||
|
|
||||||
|
|
|
@ -451,14 +451,13 @@ type blockBaseSeriesSet struct {
|
||||||
curr seriesData
|
curr seriesData
|
||||||
|
|
||||||
bufChks []chunks.Meta
|
bufChks []chunks.Meta
|
||||||
bufLbls labels.Labels
|
|
||||||
builder labels.ScratchBuilder
|
builder labels.ScratchBuilder
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *blockBaseSeriesSet) Next() bool {
|
func (b *blockBaseSeriesSet) Next() bool {
|
||||||
for b.p.Next() {
|
for b.p.Next() {
|
||||||
if err := b.index.Series(b.p.At(), &b.builder, &b.bufLbls, &b.bufChks); err != nil {
|
if err := b.index.Series(b.p.At(), &b.builder, &b.bufChks); err != nil {
|
||||||
// Postings may be stale. Skip if no underlying series exists.
|
// Postings may be stale. Skip if no underlying series exists.
|
||||||
if errors.Cause(err) == storage.ErrNotFound {
|
if errors.Cause(err) == storage.ErrNotFound {
|
||||||
continue
|
continue
|
||||||
|
@ -529,7 +528,7 @@ func (b *blockBaseSeriesSet) Next() bool {
|
||||||
intervals = intervals.Add(tombstones.Interval{Mint: b.maxt + 1, Maxt: math.MaxInt64})
|
intervals = intervals.Add(tombstones.Interval{Mint: b.maxt + 1, Maxt: math.MaxInt64})
|
||||||
}
|
}
|
||||||
|
|
||||||
b.curr.labels = b.bufLbls.Copy()
|
b.curr.labels = b.builder.Labels()
|
||||||
b.curr.chks = chks
|
b.curr.chks = chks
|
||||||
b.curr.intervals = intervals
|
b.curr.intervals = intervals
|
||||||
|
|
||||||
|
|
|
@ -1270,12 +1270,12 @@ func (m mockIndex) SortedPostings(p index.Postings) index.Postings {
|
||||||
return index.NewListPostings(ep)
|
return index.NewListPostings(ep)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m mockIndex) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, lset *labels.Labels, chks *[]chunks.Meta) error {
|
func (m mockIndex) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
||||||
s, ok := m.series[ref]
|
s, ok := m.series[ref]
|
||||||
if !ok {
|
if !ok {
|
||||||
return storage.ErrNotFound
|
return storage.ErrNotFound
|
||||||
}
|
}
|
||||||
lset.CopyFrom(s.l)
|
builder.Assign(s.l)
|
||||||
*chks = append((*chks)[:0], s.chunks...)
|
*chks = append((*chks)[:0], s.chunks...)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -1886,8 +1886,8 @@ func TestPostingsForMatchers(t *testing.T) {
|
||||||
|
|
||||||
var builder labels.ScratchBuilder
|
var builder labels.ScratchBuilder
|
||||||
for p.Next() {
|
for p.Next() {
|
||||||
lbls := labels.Labels{}
|
require.NoError(t, ir.Series(p.At(), &builder, &[]chunks.Meta{}))
|
||||||
require.NoError(t, ir.Series(p.At(), &builder, &lbls, &[]chunks.Meta{}))
|
lbls := builder.Labels()
|
||||||
if _, ok := exp[lbls.String()]; !ok {
|
if _, ok := exp[lbls.String()]; !ok {
|
||||||
t.Errorf("Evaluating %v, unexpected result %s", c.matchers, lbls.String())
|
t.Errorf("Evaluating %v, unexpected result %s", c.matchers, lbls.String())
|
||||||
} else {
|
} else {
|
||||||
|
@ -2101,7 +2101,7 @@ func (m mockMatcherIndex) SortedPostings(p index.Postings) index.Postings {
|
||||||
return index.EmptyPostings()
|
return index.EmptyPostings()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m mockMatcherIndex) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, lset *labels.Labels, chks *[]chunks.Meta) error {
|
func (m mockMatcherIndex) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -84,8 +84,7 @@ func TestRepairBadIndexVersion(t *testing.T) {
|
||||||
for p.Next() {
|
for p.Next() {
|
||||||
t.Logf("next ID %d", p.At())
|
t.Logf("next ID %d", p.At())
|
||||||
|
|
||||||
var lset labels.Labels
|
require.Error(t, r.Series(p.At(), &builder, nil))
|
||||||
require.Error(t, r.Series(p.At(), &builder, &lset, nil))
|
|
||||||
}
|
}
|
||||||
require.NoError(t, p.Err())
|
require.NoError(t, p.Err())
|
||||||
require.NoError(t, r.Close())
|
require.NoError(t, r.Close())
|
||||||
|
@ -105,10 +104,9 @@ func TestRepairBadIndexVersion(t *testing.T) {
|
||||||
for p.Next() {
|
for p.Next() {
|
||||||
t.Logf("next ID %d", p.At())
|
t.Logf("next ID %d", p.At())
|
||||||
|
|
||||||
var lset labels.Labels
|
|
||||||
var chks []chunks.Meta
|
var chks []chunks.Meta
|
||||||
require.NoError(t, r.Series(p.At(), &builder, &lset, &chks))
|
require.NoError(t, r.Series(p.At(), &builder, &chks))
|
||||||
res = append(res, lset)
|
res = append(res, builder.Labels())
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, p.Err())
|
require.NoError(t, p.Err())
|
||||||
|
|
Loading…
Reference in a new issue