Fixes after merge

Signed-off-by: György Krajcsovits <gyorgy.krajcsovits@grafana.com>
This commit is contained in:
György Krajcsovits 2023-01-08 16:09:09 +01:00
parent 103c4fd289
commit d524ab48b1
5 changed files with 10 additions and 10 deletions

View file

@ -1009,7 +1009,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, minT, maxT int64,
}
all = indexr.SortedPostings(all)
// Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp.
sets = append(sets, newBlockChunkSeriesSet(indexr, chunkr, tombsr, all, minT, maxT-1, false))
sets = append(sets, newBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, minT, maxT-1, false))
if len(outBlocks) > 1 {
// To iterate series when populating symbols, we cannot reuse postings we just got, but need to get a new copy.
@ -1057,8 +1057,6 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, minT, maxT int64,
}
var (
ref = storage.SeriesRef(0)
chks []chunks.Meta
chksIter chunks.Iterator
)

View file

@ -587,15 +587,15 @@ func TestCompaction_CompactWithSplitting(t *testing.T) {
p, err := idxr.Postings(k, v)
require.NoError(t, err)
var lbls labels.Labels
var lbls labels.ScratchBuilder
for p.Next() {
ref := p.At()
require.NoError(t, idxr.Series(ref, &lbls, nil))
require.Equal(t, uint64(shardIndex), lbls.Hash()%shardCount)
require.Equal(t, uint64(shardIndex), lbls.Labels().Hash()%shardCount)
// Collect all symbols used by series.
for _, l := range lbls {
for _, l := range lbls.Labels() {
seriesSymbols[l.Name] = struct{}{}
seriesSymbols[l.Value] = struct{}{}
}

View file

@ -1214,7 +1214,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
// appendFloatHistogram adds the float histogram.
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper chunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
// Head controls the execution of recoding, so that we own the proper
// chunk reference afterwards. We check for Appendable before
// appendPreprocessor because in case it ends up creating a new chunk,

View file

@ -2479,10 +2479,10 @@ func TestHeadShardedPostings(t *testing.T) {
// We expect the series in each shard are the expected ones.
for shardIndex, ids := range actualShards {
for _, id := range ids {
var lbls labels.Labels
var lbls labels.ScratchBuilder
require.NoError(t, ir.Series(id, &lbls, nil))
require.Equal(t, shardIndex, lbls.Hash()%shardCount)
require.Equal(t, shardIndex, lbls.Labels().Hash()%shardCount)
}
}
}

View file

@ -1909,7 +1909,9 @@ func (dec *Decoder) LabelValueFor(b []byte, label string) (string, error) {
// Previous contents of lbls can be overwritten - make sure you copy before retaining.
func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
builder.Reset()
*chks = (*chks)[:0]
if chks != nil {
*chks = (*chks)[:0]
}
d := encoding.Decbuf{B: b}