mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Fixes after merge
Signed-off-by: György Krajcsovits <gyorgy.krajcsovits@grafana.com>
This commit is contained in:
parent
103c4fd289
commit
d524ab48b1
|
@ -1009,7 +1009,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, minT, maxT int64,
|
||||||
}
|
}
|
||||||
all = indexr.SortedPostings(all)
|
all = indexr.SortedPostings(all)
|
||||||
// Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp.
|
// Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp.
|
||||||
sets = append(sets, newBlockChunkSeriesSet(indexr, chunkr, tombsr, all, minT, maxT-1, false))
|
sets = append(sets, newBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, minT, maxT-1, false))
|
||||||
|
|
||||||
if len(outBlocks) > 1 {
|
if len(outBlocks) > 1 {
|
||||||
// To iterate series when populating symbols, we cannot reuse postings we just got, but need to get a new copy.
|
// To iterate series when populating symbols, we cannot reuse postings we just got, but need to get a new copy.
|
||||||
|
@ -1057,8 +1057,6 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, minT, maxT int64,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ref = storage.SeriesRef(0)
|
|
||||||
chks []chunks.Meta
|
|
||||||
chksIter chunks.Iterator
|
chksIter chunks.Iterator
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -587,15 +587,15 @@ func TestCompaction_CompactWithSplitting(t *testing.T) {
|
||||||
p, err := idxr.Postings(k, v)
|
p, err := idxr.Postings(k, v)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var lbls labels.Labels
|
var lbls labels.ScratchBuilder
|
||||||
for p.Next() {
|
for p.Next() {
|
||||||
ref := p.At()
|
ref := p.At()
|
||||||
require.NoError(t, idxr.Series(ref, &lbls, nil))
|
require.NoError(t, idxr.Series(ref, &lbls, nil))
|
||||||
|
|
||||||
require.Equal(t, uint64(shardIndex), lbls.Hash()%shardCount)
|
require.Equal(t, uint64(shardIndex), lbls.Labels().Hash()%shardCount)
|
||||||
|
|
||||||
// Collect all symbols used by series.
|
// Collect all symbols used by series.
|
||||||
for _, l := range lbls {
|
for _, l := range lbls.Labels() {
|
||||||
seriesSymbols[l.Name] = struct{}{}
|
seriesSymbols[l.Name] = struct{}{}
|
||||||
seriesSymbols[l.Value] = struct{}{}
|
seriesSymbols[l.Value] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1214,7 +1214,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
|
||||||
|
|
||||||
// appendFloatHistogram adds the float histogram.
|
// appendFloatHistogram adds the float histogram.
|
||||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||||
func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
|
func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper chunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
|
||||||
// Head controls the execution of recoding, so that we own the proper
|
// Head controls the execution of recoding, so that we own the proper
|
||||||
// chunk reference afterwards. We check for Appendable before
|
// chunk reference afterwards. We check for Appendable before
|
||||||
// appendPreprocessor because in case it ends up creating a new chunk,
|
// appendPreprocessor because in case it ends up creating a new chunk,
|
||||||
|
|
|
@ -2479,10 +2479,10 @@ func TestHeadShardedPostings(t *testing.T) {
|
||||||
// We expect the series in each shard are the expected ones.
|
// We expect the series in each shard are the expected ones.
|
||||||
for shardIndex, ids := range actualShards {
|
for shardIndex, ids := range actualShards {
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
var lbls labels.Labels
|
var lbls labels.ScratchBuilder
|
||||||
|
|
||||||
require.NoError(t, ir.Series(id, &lbls, nil))
|
require.NoError(t, ir.Series(id, &lbls, nil))
|
||||||
require.Equal(t, shardIndex, lbls.Hash()%shardCount)
|
require.Equal(t, shardIndex, lbls.Labels().Hash()%shardCount)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1909,7 +1909,9 @@ func (dec *Decoder) LabelValueFor(b []byte, label string) (string, error) {
|
||||||
// Previous contents of lbls can be overwritten - make sure you copy before retaining.
|
// Previous contents of lbls can be overwritten - make sure you copy before retaining.
|
||||||
func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
||||||
builder.Reset()
|
builder.Reset()
|
||||||
*chks = (*chks)[:0]
|
if chks != nil {
|
||||||
|
*chks = (*chks)[:0]
|
||||||
|
}
|
||||||
|
|
||||||
d := encoding.Decbuf{B: b}
|
d := encoding.Decbuf{B: b}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue