2017-04-10 11:59:45 -07:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2016-12-14 06:47:05 -08:00
|
|
|
package tsdb
|
|
|
|
|
|
|
|
import (
|
2020-02-06 07:58:38 -08:00
|
|
|
"context"
|
2023-11-16 10:54:41 -08:00
|
|
|
"errors"
|
2017-07-05 07:19:28 -07:00
|
|
|
"fmt"
|
2017-04-13 12:06:14 -07:00
|
|
|
"math"
|
2017-04-13 07:27:31 -07:00
|
|
|
"math/rand"
|
2018-10-25 02:32:57 -07:00
|
|
|
"path/filepath"
|
2016-12-19 02:44:11 -08:00
|
|
|
"sort"
|
2019-06-07 06:41:44 -07:00
|
|
|
"strconv"
|
2023-08-29 02:03:27 -07:00
|
|
|
"sync"
|
2016-12-14 06:47:05 -08:00
|
|
|
"testing"
|
2020-07-31 08:03:02 -07:00
|
|
|
"time"
|
2016-12-14 06:47:05 -08:00
|
|
|
|
2022-11-28 00:12:54 -08:00
|
|
|
"github.com/oklog/ulid"
|
2020-10-29 02:43:23 -07:00
|
|
|
"github.com/stretchr/testify/require"
|
2020-10-22 02:00:08 -07:00
|
|
|
|
2023-05-19 01:24:06 -07:00
|
|
|
"github.com/prometheus/prometheus/model/histogram"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
2020-02-06 07:58:38 -08:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2019-08-13 01:34:14 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/index"
|
2019-09-19 02:15:41 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
2019-08-13 01:34:14 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
2023-09-14 09:57:31 -07:00
|
|
|
"github.com/prometheus/prometheus/util/annotations"
|
2016-12-14 06:47:05 -08:00
|
|
|
)
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
// TODO(bwplotka): Replace those mocks with remote.concreteSeriesSet.
|
2018-09-21 01:07:35 -07:00
|
|
|
type mockSeriesSet struct {
|
|
|
|
next func() bool
|
2020-02-06 07:58:38 -08:00
|
|
|
series func() storage.Series
|
2023-09-14 09:57:31 -07:00
|
|
|
ws func() annotations.Annotations
|
2018-09-21 01:07:35 -07:00
|
|
|
err func() error
|
|
|
|
}
|
|
|
|
|
2023-09-14 09:57:31 -07:00
|
|
|
func (m *mockSeriesSet) Next() bool { return m.next() }
|
|
|
|
func (m *mockSeriesSet) At() storage.Series { return m.series() }
|
|
|
|
func (m *mockSeriesSet) Err() error { return m.err() }
|
|
|
|
func (m *mockSeriesSet) Warnings() annotations.Annotations { return m.ws() }
|
2018-09-21 01:07:35 -07:00
|
|
|
|
2020-02-06 07:58:38 -08:00
|
|
|
func newMockSeriesSet(list []storage.Series) *mockSeriesSet {
|
2018-09-21 01:07:35 -07:00
|
|
|
i := -1
|
|
|
|
return &mockSeriesSet{
|
|
|
|
next: func() bool {
|
|
|
|
i++
|
|
|
|
return i < len(list)
|
|
|
|
},
|
2020-02-06 07:58:38 -08:00
|
|
|
series: func() storage.Series {
|
2018-09-21 01:07:35 -07:00
|
|
|
return list[i]
|
|
|
|
},
|
|
|
|
err: func() error { return nil },
|
2023-09-14 09:57:31 -07:00
|
|
|
ws: func() annotations.Annotations { return nil },
|
2018-09-21 01:07:35 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
type mockChunkSeriesSet struct {
|
|
|
|
next func() bool
|
|
|
|
series func() storage.ChunkSeries
|
2023-09-14 09:57:31 -07:00
|
|
|
ws func() annotations.Annotations
|
2020-07-31 08:03:02 -07:00
|
|
|
err func() error
|
2016-12-19 02:44:11 -08:00
|
|
|
}
|
|
|
|
|
2023-09-14 09:57:31 -07:00
|
|
|
func (m *mockChunkSeriesSet) Next() bool { return m.next() }
|
|
|
|
func (m *mockChunkSeriesSet) At() storage.ChunkSeries { return m.series() }
|
|
|
|
func (m *mockChunkSeriesSet) Err() error { return m.err() }
|
|
|
|
func (m *mockChunkSeriesSet) Warnings() annotations.Annotations { return m.ws() }
|
2016-12-19 02:44:11 -08:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
func newMockChunkSeriesSet(list []storage.ChunkSeries) *mockChunkSeriesSet {
|
|
|
|
i := -1
|
|
|
|
return &mockChunkSeriesSet{
|
|
|
|
next: func() bool {
|
|
|
|
i++
|
|
|
|
return i < len(list)
|
|
|
|
},
|
|
|
|
series: func() storage.ChunkSeries {
|
|
|
|
return list[i]
|
|
|
|
},
|
|
|
|
err: func() error { return nil },
|
2023-09-14 09:57:31 -07:00
|
|
|
ws: func() annotations.Annotations { return nil },
|
2020-07-31 08:03:02 -07:00
|
|
|
}
|
2016-12-19 02:44:11 -08:00
|
|
|
}
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2018-10-12 02:45:19 -07:00
|
|
|
type seriesSamples struct {
|
2017-04-13 07:27:31 -07:00
|
|
|
lset map[string]string
|
|
|
|
chunks [][]sample
|
2018-10-12 02:45:19 -07:00
|
|
|
}
|
|
|
|
|
2023-10-03 13:09:25 -07:00
|
|
|
// Index: labels -> postings -> chunkMetas -> chunkRef.
|
|
|
|
// ChunkReader: ref -> vals.
|
2019-04-25 03:07:04 -07:00
|
|
|
func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkReader, int64, int64) {
|
2017-04-13 07:27:31 -07:00
|
|
|
sort.Slice(tc, func(i, j int) bool {
|
|
|
|
return labels.Compare(labels.FromMap(tc[i].lset), labels.FromMap(tc[i].lset)) < 0
|
|
|
|
})
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
postings := index.NewMemPostings()
|
2021-11-06 03:10:04 -07:00
|
|
|
chkReader := mockChunkReader(make(map[chunks.ChunkRef]chunkenc.Chunk))
|
2020-09-10 08:05:47 -07:00
|
|
|
lblIdx := make(map[string]map[string]struct{})
|
2017-04-13 07:27:31 -07:00
|
|
|
mi := newMockIndex()
|
2019-02-14 05:29:41 -08:00
|
|
|
blockMint := int64(math.MaxInt64)
|
|
|
|
blockMaxt := int64(math.MinInt64)
|
2017-04-13 07:27:31 -07:00
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
var chunkRef chunks.ChunkRef
|
2017-04-13 07:27:31 -07:00
|
|
|
for i, s := range tc {
|
2023-04-09 00:08:40 -07:00
|
|
|
i++ // 0 is not a valid posting.
|
2017-11-30 06:34:49 -08:00
|
|
|
metas := make([]chunks.Meta, 0, len(s.chunks))
|
2017-04-13 07:27:31 -07:00
|
|
|
for _, chk := range s.chunks {
|
2019-02-14 05:29:41 -08:00
|
|
|
if chk[0].t < blockMint {
|
|
|
|
blockMint = chk[0].t
|
|
|
|
}
|
|
|
|
if chk[len(chk)-1].t > blockMaxt {
|
|
|
|
blockMaxt = chk[len(chk)-1].t
|
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
metas = append(metas, chunks.Meta{
|
2017-04-13 07:27:31 -07:00
|
|
|
MinTime: chk[0].t,
|
|
|
|
MaxTime: chk[len(chk)-1].t,
|
2019-04-18 06:11:39 -07:00
|
|
|
Ref: chunkRef,
|
2017-04-13 07:27:31 -07:00
|
|
|
})
|
|
|
|
|
2023-11-01 07:52:04 -07:00
|
|
|
switch {
|
|
|
|
case chk[0].fh != nil:
|
|
|
|
chunk := chunkenc.NewFloatHistogramChunk()
|
|
|
|
app, _ := chunk.Appender()
|
|
|
|
for _, smpl := range chk {
|
|
|
|
require.NotNil(t, smpl.fh, "chunk can only contain one type of sample")
|
|
|
|
_, _, _, err := app.AppendFloatHistogram(nil, smpl.t, smpl.fh, true)
|
|
|
|
require.NoError(t, err, "chunk should be appendable")
|
|
|
|
}
|
|
|
|
chkReader[chunkRef] = chunk
|
|
|
|
case chk[0].h != nil:
|
|
|
|
chunk := chunkenc.NewHistogramChunk()
|
|
|
|
app, _ := chunk.Appender()
|
|
|
|
for _, smpl := range chk {
|
|
|
|
require.NotNil(t, smpl.h, "chunk can only contain one type of sample")
|
|
|
|
_, _, _, err := app.AppendHistogram(nil, smpl.t, smpl.h, true)
|
|
|
|
require.NoError(t, err, "chunk should be appendable")
|
|
|
|
}
|
|
|
|
chkReader[chunkRef] = chunk
|
|
|
|
default:
|
|
|
|
chunk := chunkenc.NewXORChunk()
|
|
|
|
app, _ := chunk.Appender()
|
|
|
|
for _, smpl := range chk {
|
|
|
|
require.Nil(t, smpl.h, "chunk can only contain one type of sample")
|
|
|
|
require.Nil(t, smpl.fh, "chunk can only contain one type of sample")
|
|
|
|
app.Append(smpl.t, smpl.f)
|
|
|
|
}
|
|
|
|
chkReader[chunkRef] = chunk
|
2017-04-13 07:27:31 -07:00
|
|
|
}
|
2020-03-23 07:47:11 -07:00
|
|
|
chunkRef++
|
2017-04-13 07:27:31 -07:00
|
|
|
}
|
2017-07-21 01:37:52 -07:00
|
|
|
ls := labels.FromMap(s.lset)
|
2021-11-06 03:10:04 -07:00
|
|
|
require.NoError(t, mi.AddSeries(storage.SeriesRef(i), ls, metas...))
|
2017-04-13 07:27:31 -07:00
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
postings.Add(storage.SeriesRef(i), ls)
|
2017-07-21 01:37:52 -07:00
|
|
|
|
2022-03-09 14:17:29 -08:00
|
|
|
ls.Range(func(l labels.Label) {
|
2017-07-21 01:37:52 -07:00
|
|
|
vs, present := lblIdx[l.Name]
|
|
|
|
if !present {
|
2020-09-10 08:05:47 -07:00
|
|
|
vs = map[string]struct{}{}
|
2017-07-21 01:37:52 -07:00
|
|
|
lblIdx[l.Name] = vs
|
|
|
|
}
|
2020-09-10 08:05:47 -07:00
|
|
|
vs[l.Value] = struct{}{}
|
2022-03-09 14:17:29 -08:00
|
|
|
})
|
2017-04-13 07:27:31 -07:00
|
|
|
}
|
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, postings.Iter(func(l labels.Label, p index.Postings) error {
|
2017-11-30 06:34:49 -08:00
|
|
|
return mi.WritePostings(l.Name, l.Value, p)
|
2019-04-25 03:07:04 -07:00
|
|
|
}))
|
2019-02-14 05:29:41 -08:00
|
|
|
return mi, chkReader, blockMint, blockMaxt
|
2017-04-13 07:27:31 -07:00
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
type blockQuerierTestCase struct {
|
|
|
|
mint, maxt int64
|
|
|
|
ms []*labels.Matcher
|
2021-11-03 03:08:34 -07:00
|
|
|
hints *storage.SelectHints
|
2020-07-31 08:03:02 -07:00
|
|
|
exp storage.SeriesSet
|
|
|
|
expChks storage.ChunkSeriesSet
|
|
|
|
}
|
2017-04-21 13:08:26 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr ChunkReader, stones *tombstones.MemTombstones) {
|
|
|
|
t.Run("sample", func(t *testing.T) {
|
|
|
|
q := blockQuerier{
|
|
|
|
blockBaseQuerier: &blockBaseQuerier{
|
|
|
|
index: ir,
|
|
|
|
chunks: cr,
|
|
|
|
tombstones: stones,
|
2017-04-21 13:08:26 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
mint: c.mint,
|
|
|
|
maxt: c.maxt,
|
2017-04-13 07:27:31 -07:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2023-09-12 03:37:38 -07:00
|
|
|
res := q.Select(context.Background(), false, c.hints, c.ms...)
|
2020-10-29 02:43:23 -07:00
|
|
|
defer func() { require.NoError(t, q.Close()) }()
|
2017-04-13 07:27:31 -07:00
|
|
|
|
|
|
|
for {
|
|
|
|
eok, rok := c.exp.Next(), res.Next()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, eok, rok)
|
2017-04-13 07:27:31 -07:00
|
|
|
|
|
|
|
if !eok {
|
2023-12-07 03:35:01 -08:00
|
|
|
require.Empty(t, res.Warnings())
|
2020-07-31 08:03:02 -07:00
|
|
|
break
|
2017-04-13 07:27:31 -07:00
|
|
|
}
|
|
|
|
sexp := c.exp.At()
|
|
|
|
sres := res.At()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, sexp.Labels(), sres.Labels())
|
2017-04-13 07:27:31 -07:00
|
|
|
|
2022-09-20 10:16:45 -07:00
|
|
|
smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
|
|
|
|
smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
|
2017-04-13 07:27:31 -07:00
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, errExp, errRes)
|
|
|
|
require.Equal(t, smplExp, smplRes)
|
2017-04-13 07:27:31 -07:00
|
|
|
}
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, res.Err())
|
2020-07-31 08:03:02 -07:00
|
|
|
})
|
2017-05-22 01:01:57 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
t.Run("chunk", func(t *testing.T) {
|
|
|
|
q := blockChunkQuerier{
|
|
|
|
blockBaseQuerier: &blockBaseQuerier{
|
|
|
|
index: ir,
|
|
|
|
chunks: cr,
|
|
|
|
tombstones: stones,
|
2017-05-22 01:01:57 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
mint: c.mint,
|
|
|
|
maxt: c.maxt,
|
2017-05-22 01:01:57 -07:00
|
|
|
},
|
|
|
|
}
|
2023-09-12 03:37:38 -07:00
|
|
|
res := q.Select(context.Background(), false, c.hints, c.ms...)
|
2020-10-29 02:43:23 -07:00
|
|
|
defer func() { require.NoError(t, q.Close()) }()
|
2017-05-22 01:01:57 -07:00
|
|
|
|
|
|
|
for {
|
2020-07-31 08:03:02 -07:00
|
|
|
eok, rok := c.expChks.Next(), res.Next()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, eok, rok)
|
2017-05-22 01:01:57 -07:00
|
|
|
|
|
|
|
if !eok {
|
2023-12-07 03:35:01 -08:00
|
|
|
require.Empty(t, res.Warnings())
|
2020-07-31 08:03:02 -07:00
|
|
|
break
|
2017-05-22 01:01:57 -07:00
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
sexpChks := c.expChks.At()
|
2017-05-22 01:01:57 -07:00
|
|
|
sres := res.At()
|
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, sexpChks.Labels(), sres.Labels())
|
2017-05-22 01:01:57 -07:00
|
|
|
|
2022-09-20 10:16:45 -07:00
|
|
|
chksExp, errExp := storage.ExpandChunks(sexpChks.Iterator(nil))
|
2020-07-31 08:03:02 -07:00
|
|
|
rmChunkRefs(chksExp)
|
2022-09-20 10:16:45 -07:00
|
|
|
chksRes, errRes := storage.ExpandChunks(sres.Iterator(nil))
|
2020-07-31 08:03:02 -07:00
|
|
|
rmChunkRefs(chksRes)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, errExp, errRes)
|
2023-02-21 00:32:59 -08:00
|
|
|
|
|
|
|
require.Equal(t, len(chksExp), len(chksRes))
|
2023-08-24 06:21:17 -07:00
|
|
|
var exp, act [][]chunks.Sample
|
2023-02-21 00:32:59 -08:00
|
|
|
for i := range chksExp {
|
|
|
|
samples, err := storage.ExpandSamples(chksExp[i].Chunk.Iterator(nil), nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
exp = append(exp, samples)
|
|
|
|
samples, err = storage.ExpandSamples(chksRes[i].Chunk.Iterator(nil), nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
act = append(act, samples)
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, exp, act)
|
2017-05-22 01:01:57 -07:00
|
|
|
}
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, res.Err())
|
2020-07-31 08:03:02 -07:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestBlockQuerier(t *testing.T) {
|
|
|
|
for _, c := range []blockQuerierTestCase{
|
|
|
|
{
|
|
|
|
mint: 0,
|
|
|
|
maxt: 0,
|
|
|
|
ms: []*labels.Matcher{},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
mint: 0,
|
|
|
|
maxt: 0,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
mint: 1,
|
|
|
|
maxt: 0,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
mint: math.MinInt64,
|
|
|
|
maxt: math.MaxInt64,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "x")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
mint: math.MinInt64,
|
|
|
|
maxt: math.MaxInt64,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []chunks.Sample{sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
mint: 2,
|
|
|
|
maxt: 6,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
},
|
2021-11-03 03:08:34 -07:00
|
|
|
{
|
|
|
|
// This test runs a query disabling trimming. All chunks containing at least 1 sample within the queried
|
|
|
|
// time range will be returned.
|
|
|
|
mint: 2,
|
|
|
|
maxt: 6,
|
|
|
|
hints: &storage.SelectHints{Start: 2, End: 6, DisableTrimming: true},
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
2021-11-03 03:08:34 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2021-11-03 03:08:34 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}},
|
|
|
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
2021-11-03 03:08:34 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
|
|
|
|
[]chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2021-11-03 03:08:34 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// This test runs a query disabling trimming. All chunks containing at least 1 sample within the queried
|
|
|
|
// time range will be returned.
|
|
|
|
mint: 5,
|
|
|
|
maxt: 6,
|
|
|
|
hints: &storage.SelectHints{Start: 5, End: 6, DisableTrimming: true},
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
2021-11-03 03:08:34 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2021-11-03 03:08:34 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
2021-11-03 03:08:34 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2021-11-03 03:08:34 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
} {
|
|
|
|
t.Run("", func(t *testing.T) {
|
|
|
|
ir, cr, _, _ := createIdxChkReaders(t, testData)
|
|
|
|
testBlockQuerier(t, c, ir, cr, tombstones.NewMemTombstones())
|
|
|
|
})
|
2017-05-22 01:01:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) {
|
|
|
|
for _, c := range []blockQuerierTestCase{
|
|
|
|
{
|
|
|
|
mint: 0,
|
|
|
|
maxt: 0,
|
|
|
|
ms: []*labels.Matcher{},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
mint: 0,
|
|
|
|
maxt: 0,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
mint: 1,
|
|
|
|
maxt: 0,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
mint: math.MinInt64,
|
|
|
|
maxt: math.MaxInt64,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "x")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
mint: math.MinInt64,
|
|
|
|
maxt: math.MaxInt64,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
mint: 2,
|
|
|
|
maxt: 6,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
} {
|
|
|
|
t.Run("", func(t *testing.T) {
|
2021-02-09 06:12:48 -08:00
|
|
|
opts := DefaultHeadOptions()
|
|
|
|
opts.ChunkRange = 2 * time.Hour.Milliseconds()
|
2022-09-20 10:05:50 -07:00
|
|
|
h, err := NewHead(nil, nil, nil, nil, opts, nil)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-07-31 08:03:02 -07:00
|
|
|
defer h.Close()
|
|
|
|
|
2020-07-31 08:33:54 -07:00
|
|
|
app := h.Appender(context.Background())
|
2020-07-31 08:03:02 -07:00
|
|
|
for _, s := range testData {
|
|
|
|
for _, chk := range s.chunks {
|
|
|
|
for _, sample := range chk {
|
2023-03-30 10:50:13 -07:00
|
|
|
_, err = app.Append(0, labels.FromMap(s.lset), sample.t, sample.f)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-07-31 08:03:02 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, app.Commit())
|
2020-07-31 08:03:02 -07:00
|
|
|
|
|
|
|
hr := NewRangeHead(h, c.mint, c.maxt)
|
|
|
|
ir, err := hr.Index()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-07-31 08:03:02 -07:00
|
|
|
defer ir.Close()
|
|
|
|
|
|
|
|
cr, err := hr.Chunks()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-07-31 08:03:02 -07:00
|
|
|
defer cr.Close()
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
testBlockQuerier(t, c, ir, cr, tombstones.NewMemTombstones())
|
|
|
|
})
|
2017-04-09 07:00:25 -07:00
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
}
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2023-05-17 06:15:12 -07:00
|
|
|
func TestBlockQuerier_TrimmingDoesNotModifyOriginalTombstoneIntervals(t *testing.T) {
|
2023-09-13 08:45:06 -07:00
|
|
|
ctx := context.Background()
|
2023-05-17 06:15:12 -07:00
|
|
|
c := blockQuerierTestCase{
|
|
|
|
mint: 2,
|
|
|
|
maxt: 6,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", "a")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{
|
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
2023-05-17 06:15:12 -07:00
|
|
|
),
|
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2023-05-17 06:15:12 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
2023-05-17 06:15:12 -07:00
|
|
|
),
|
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
2023-05-17 06:15:12 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
ir, cr, _, _ := createIdxChkReaders(t, testData)
|
|
|
|
stones := tombstones.NewMemTombstones()
|
2023-09-13 08:45:06 -07:00
|
|
|
p, err := ir.Postings(ctx, "a", "a")
|
2023-05-17 06:15:12 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
refs, err := index.ExpandPostings(p)
|
|
|
|
require.NoError(t, err)
|
|
|
|
for _, ref := range refs {
|
|
|
|
stones.AddInterval(ref, tombstones.Interval{Mint: 1, Maxt: 2})
|
|
|
|
}
|
|
|
|
testBlockQuerier(t, c, ir, cr, stones)
|
|
|
|
for _, ref := range refs {
|
|
|
|
intervals, err := stones.Get(ref)
|
|
|
|
require.NoError(t, err)
|
|
|
|
// Without copy, the intervals could be [math.MinInt64, 2].
|
|
|
|
require.Equal(t, tombstones.Intervals{{Mint: 1, Maxt: 2}}, intervals)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
var testData = []seriesSamples{
|
|
|
|
{
|
|
|
|
lset: map[string]string{"a": "a"},
|
|
|
|
chunks: [][]sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
{{1, 2, nil, nil}, {2, 3, nil, nil}, {3, 4, nil, nil}},
|
|
|
|
{{5, 2, nil, nil}, {6, 3, nil, nil}, {7, 4, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
lset: map[string]string{"a": "a", "b": "b"},
|
|
|
|
chunks: [][]sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
{{1, 1, nil, nil}, {2, 2, nil, nil}, {3, 3, nil, nil}},
|
|
|
|
{{5, 3, nil, nil}, {6, 6, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
lset: map[string]string{"b": "b"},
|
|
|
|
chunks: [][]sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
{{1, 3, nil, nil}, {2, 2, nil, nil}, {3, 6, nil, nil}},
|
|
|
|
{{5, 1, nil, nil}, {6, 7, nil, nil}, {7, 2, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
func TestBlockQuerierDelete(t *testing.T) {
|
|
|
|
stones := tombstones.NewTestMemTombstones([]tombstones.Intervals{
|
|
|
|
{{Mint: 1, Maxt: 3}},
|
|
|
|
{{Mint: 1, Maxt: 3}, {Mint: 6, Maxt: 10}},
|
|
|
|
{{Mint: 6, Maxt: 10}},
|
|
|
|
})
|
|
|
|
|
|
|
|
for _, c := range []blockQuerierTestCase{
|
2017-04-09 07:00:25 -07:00
|
|
|
{
|
2020-07-31 08:03:02 -07:00
|
|
|
mint: 0,
|
|
|
|
maxt: 0,
|
|
|
|
ms: []*labels.Matcher{},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
|
|
|
{
|
2020-07-31 08:03:02 -07:00
|
|
|
mint: 0,
|
|
|
|
maxt: 0,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
mint: 1,
|
|
|
|
maxt: 0,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
mint: math.MinInt64,
|
|
|
|
maxt: math.MaxInt64,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "x")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{}),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
mint: math.MinInt64,
|
|
|
|
maxt: math.MaxInt64,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{5, 3, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{5, 3, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []chunks.Sample{sample{5, 1, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
|
|
|
}),
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
{
|
|
|
|
mint: 2,
|
|
|
|
maxt: 6,
|
|
|
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
|
|
|
exp: newMockSeriesSet([]storage.Series{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{5, 3, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
2022-03-09 14:17:29 -08:00
|
|
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
2023-08-24 06:21:17 -07:00
|
|
|
[]chunks.Sample{sample{5, 3, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
),
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
} {
|
|
|
|
t.Run("", func(t *testing.T) {
|
|
|
|
ir, cr, _, _ := createIdxChkReaders(t, testData)
|
|
|
|
testBlockQuerier(t, c, ir, cr, stones)
|
|
|
|
})
|
2017-04-09 07:00:25 -07:00
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
}
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
type fakeChunksReader struct {
|
|
|
|
ChunkReader
|
2023-11-28 02:14:29 -08:00
|
|
|
chks map[chunks.ChunkRef]chunkenc.Chunk
|
|
|
|
iterables map[chunks.ChunkRef]chunkenc.Iterable
|
2020-07-31 08:03:02 -07:00
|
|
|
}
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2023-08-24 06:21:17 -07:00
|
|
|
func createFakeReaderAndNotPopulatedChunks(s ...[]chunks.Sample) (*fakeChunksReader, []chunks.Meta) {
|
2020-07-31 08:03:02 -07:00
|
|
|
f := &fakeChunksReader{
|
2023-11-28 02:14:29 -08:00
|
|
|
chks: map[chunks.ChunkRef]chunkenc.Chunk{},
|
|
|
|
iterables: map[chunks.ChunkRef]chunkenc.Iterable{},
|
2020-07-31 08:03:02 -07:00
|
|
|
}
|
|
|
|
chks := make([]chunks.Meta, 0, len(s))
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
for ref, samples := range s {
|
2023-08-24 06:21:17 -07:00
|
|
|
chk, _ := chunks.ChunkFromSamples(samples)
|
2021-11-06 03:10:04 -07:00
|
|
|
f.chks[chunks.ChunkRef(ref)] = chk.Chunk
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
chks = append(chks, chunks.Meta{
|
2021-11-06 03:10:04 -07:00
|
|
|
Ref: chunks.ChunkRef(ref),
|
2020-07-31 08:03:02 -07:00
|
|
|
MinTime: chk.MinTime,
|
|
|
|
MaxTime: chk.MaxTime,
|
|
|
|
})
|
2017-04-09 07:00:25 -07:00
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
return f, chks
|
2017-04-09 07:00:25 -07:00
|
|
|
}
|
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
// Samples in each slice are assumed to be sorted.
|
|
|
|
func createFakeReaderAndIterables(s ...[]chunks.Sample) (*fakeChunksReader, []chunks.Meta) {
|
|
|
|
f := &fakeChunksReader{
|
|
|
|
chks: map[chunks.ChunkRef]chunkenc.Chunk{},
|
|
|
|
iterables: map[chunks.ChunkRef]chunkenc.Iterable{},
|
|
|
|
}
|
|
|
|
chks := make([]chunks.Meta, 0, len(s))
|
|
|
|
|
|
|
|
for ref, samples := range s {
|
|
|
|
f.iterables[chunks.ChunkRef(ref)] = &mockIterable{s: samples}
|
|
|
|
|
|
|
|
var minTime, maxTime int64
|
|
|
|
if len(samples) > 0 {
|
|
|
|
minTime = samples[0].T()
|
|
|
|
maxTime = samples[len(samples)-1].T()
|
|
|
|
}
|
|
|
|
chks = append(chks, chunks.Meta{
|
|
|
|
Ref: chunks.ChunkRef(ref),
|
|
|
|
MinTime: minTime,
|
|
|
|
MaxTime: maxTime,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return f, chks
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *fakeChunksReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
|
|
|
if chk, ok := r.chks[meta.Ref]; ok {
|
|
|
|
return chk, nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if it, ok := r.iterables[meta.Ref]; ok {
|
|
|
|
return nil, it, nil
|
|
|
|
}
|
|
|
|
return nil, nil, fmt.Errorf("chunk or iterable not found at ref %v", meta.Ref)
|
|
|
|
}
|
|
|
|
|
|
|
|
type mockIterable struct {
|
|
|
|
s []chunks.Sample
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *mockIterable) Iterator(chunkenc.Iterator) chunkenc.Iterator {
|
|
|
|
return &mockSampleIterator{
|
|
|
|
s: it.s,
|
|
|
|
idx: -1,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type mockSampleIterator struct {
|
|
|
|
s []chunks.Sample
|
|
|
|
idx int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *mockSampleIterator) Seek(t int64) chunkenc.ValueType {
|
|
|
|
for ; it.idx < len(it.s); it.idx++ {
|
|
|
|
if it.idx != -1 && it.s[it.idx].T() >= t {
|
|
|
|
return it.s[it.idx].Type()
|
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
}
|
2023-11-28 02:14:29 -08:00
|
|
|
|
|
|
|
return chunkenc.ValNone
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *mockSampleIterator) At() (int64, float64) {
|
|
|
|
return it.s[it.idx].T(), it.s[it.idx].F()
|
|
|
|
}
|
|
|
|
|
2024-01-23 08:02:14 -08:00
|
|
|
func (it *mockSampleIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
|
2023-11-28 02:14:29 -08:00
|
|
|
return it.s[it.idx].T(), it.s[it.idx].H()
|
|
|
|
}
|
|
|
|
|
2024-01-23 08:02:14 -08:00
|
|
|
func (it *mockSampleIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
|
2023-11-28 02:14:29 -08:00
|
|
|
return it.s[it.idx].T(), it.s[it.idx].FH()
|
2017-04-09 07:00:25 -07:00
|
|
|
}
|
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
func (it *mockSampleIterator) AtT() int64 {
|
|
|
|
return it.s[it.idx].T()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *mockSampleIterator) Next() chunkenc.ValueType {
|
|
|
|
if it.idx < len(it.s)-1 {
|
|
|
|
it.idx++
|
|
|
|
return it.s[it.idx].Type()
|
|
|
|
}
|
|
|
|
|
|
|
|
return chunkenc.ValNone
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *mockSampleIterator) Err() error { return nil }
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
2023-11-01 04:53:41 -07:00
|
|
|
type minMaxTimes struct {
|
|
|
|
minTime, maxTime int64
|
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
cases := []struct {
|
2023-11-28 02:14:29 -08:00
|
|
|
name string
|
|
|
|
samples [][]chunks.Sample
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2023-11-01 04:53:41 -07:00
|
|
|
expected []chunks.Sample
|
|
|
|
expectedChks []chunks.Meta
|
|
|
|
expectedMinMaxTimes []minMaxTimes
|
2017-04-13 12:07:21 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
intervals tombstones.Intervals
|
|
|
|
|
|
|
|
// Seek being zero means do not test seek.
|
|
|
|
seek int64
|
|
|
|
seekSuccess bool
|
2023-11-28 02:14:29 -08:00
|
|
|
|
|
|
|
// Set this to true if a sample slice will form multiple chunks.
|
|
|
|
skipChunkTest bool
|
|
|
|
|
|
|
|
skipIterableTest bool
|
2017-04-09 07:00:25 -07:00
|
|
|
}{
|
|
|
|
{
|
2023-11-28 02:14:29 -08:00
|
|
|
name: "no chunk",
|
|
|
|
samples: [][]chunks.Sample{},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
|
|
|
{
|
2023-11-28 02:14:29 -08:00
|
|
|
name: "one empty chunk", // This should never happen.
|
|
|
|
samples: [][]chunks.Sample{{}},
|
2020-07-31 08:03:02 -07:00
|
|
|
|
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{}),
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{0, 0}},
|
2023-11-28 02:14:29 -08:00
|
|
|
// iterables with no samples will return no chunks instead of empty chunks
|
|
|
|
skipIterableTest: true,
|
2020-07-31 08:03:02 -07:00
|
|
|
},
|
|
|
|
{
|
2023-11-28 02:14:29 -08:00
|
|
|
name: "one empty iterable",
|
|
|
|
samples: [][]chunks.Sample{{}},
|
|
|
|
|
|
|
|
// iterables with no samples will return no chunks
|
|
|
|
expectedChks: nil,
|
|
|
|
skipChunkTest: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "three empty chunks", // This should never happen.
|
|
|
|
samples: [][]chunks.Sample{{}, {}, {}},
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{}),
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{0, 0}, {0, 0}, {0, 0}},
|
2023-11-28 02:14:29 -08:00
|
|
|
// iterables with no samples will return no chunks instead of empty chunks
|
|
|
|
skipIterableTest: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "three empty iterables",
|
|
|
|
samples: [][]chunks.Sample{{}, {}, {}},
|
|
|
|
|
|
|
|
// iterables with no samples will return no chunks
|
|
|
|
expectedChks: nil,
|
|
|
|
skipChunkTest: true,
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
|
|
|
{
|
2020-07-31 08:03:02 -07:00
|
|
|
name: "one chunk",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
},
|
|
|
|
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
}),
|
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
|
2020-07-31 08:03:02 -07:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "two full chunks",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
|
|
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
|
|
|
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
}),
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
}),
|
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
|
|
|
{
|
2020-07-31 08:03:02 -07:00
|
|
|
name: "three full chunks",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
|
|
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
|
|
|
{sample{10, 22, nil, nil}, sample{203, 3493, nil, nil}},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
|
|
|
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, sample{10, 22, nil, nil}, sample{203, 3493, nil, nil},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
}),
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
}),
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{10, 22, nil, nil}, sample{203, 3493, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
}),
|
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}, {10, 203}},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
// Seek cases.
|
2017-04-09 07:00:25 -07:00
|
|
|
{
|
2023-11-28 02:14:29 -08:00
|
|
|
name: "three empty chunks and seek", // This should never happen.
|
|
|
|
samples: [][]chunks.Sample{{}, {}, {}},
|
|
|
|
seek: 1,
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
seekSuccess: false,
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
|
|
|
{
|
2020-07-31 08:03:02 -07:00
|
|
|
name: "two chunks and seek beyond chunks",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
|
|
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
seek: 10,
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
seekSuccess: false,
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
|
|
|
{
|
2020-07-31 08:03:02 -07:00
|
|
|
name: "two chunks and seek on middle of first chunk",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
|
|
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
seek: 2,
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
seekSuccess: true,
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-07-31 08:03:02 -07:00
|
|
|
name: "two chunks and seek before first chunk",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
|
|
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
seek: -32,
|
|
|
|
|
|
|
|
seekSuccess: true,
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
},
|
|
|
|
// Deletion / Trim cases.
|
|
|
|
{
|
|
|
|
name: "no chunk with deletion interval",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{},
|
2020-07-31 08:03:02 -07:00
|
|
|
intervals: tombstones.Intervals{{Mint: 20, Maxt: 21}},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "two chunks with trimmed first and last samples from edge chunks",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
|
|
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
intervals: tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 9, Maxt: math.MaxInt64}),
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
}),
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{7, 89, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
}),
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{3, 6}, {7, 7}},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
|
|
|
{
|
2020-07-31 08:03:02 -07:00
|
|
|
name: "two chunks with trimmed middle sample of first chunk",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
|
|
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
intervals: tombstones.Intervals{{Mint: 2, Maxt: 3}},
|
|
|
|
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{1, 2, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{1, 2, nil, nil}, sample{6, 1, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
}),
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
}),
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}},
|
2020-07-31 08:03:02 -07:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "two chunks with deletion across two chunks",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
|
|
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
},
|
|
|
|
intervals: tombstones.Intervals{{Mint: 6, Maxt: 7}},
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{9, 8, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
}),
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{9, 8, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
}),
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 3}, {9, 9}},
|
2017-04-09 07:00:25 -07:00
|
|
|
},
|
2023-11-29 02:24:04 -08:00
|
|
|
{
|
|
|
|
name: "two chunks with first chunk deleted",
|
|
|
|
samples: [][]chunks.Sample{
|
|
|
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
|
|
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
|
|
|
},
|
|
|
|
intervals: tombstones.Intervals{{Mint: 1, Maxt: 6}},
|
|
|
|
|
|
|
|
expected: []chunks.Sample{
|
|
|
|
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
expectedMinMaxTimes: []minMaxTimes{{7, 9}},
|
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
// Deletion with seek.
|
|
|
|
{
|
|
|
|
name: "two chunks with trimmed first and last samples from edge chunks, seek from middle of first chunk",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
|
|
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
2020-07-31 08:03:02 -07:00
|
|
|
},
|
|
|
|
intervals: tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 9, Maxt: math.MaxInt64}),
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
seek: 3,
|
|
|
|
seekSuccess: true,
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2021-11-28 23:54:23 -08:00
|
|
|
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil},
|
2020-07-31 08:03:02 -07:00
|
|
|
},
|
|
|
|
},
|
2023-11-28 02:14:29 -08:00
|
|
|
{
|
|
|
|
name: "one chunk where all samples are trimmed",
|
|
|
|
samples: [][]chunks.Sample{
|
|
|
|
{sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
|
|
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
|
|
|
},
|
|
|
|
intervals: tombstones.Intervals{{Mint: math.MinInt64, Maxt: 3}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64}),
|
|
|
|
|
|
|
|
expected: nil,
|
|
|
|
expectedChks: nil,
|
|
|
|
},
|
2023-05-19 01:24:06 -07:00
|
|
|
{
|
|
|
|
name: "one histogram chunk",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
{
|
|
|
|
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
|
|
|
|
sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil},
|
|
|
|
sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
|
|
|
},
|
|
|
|
},
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
|
|
|
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
|
|
|
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
|
|
|
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
|
|
|
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
|
|
|
|
}),
|
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
|
2023-05-19 01:24:06 -07:00
|
|
|
},
|
|
|
|
{
|
2023-11-01 05:04:23 -07:00
|
|
|
name: "one histogram chunk intersect with earlier deletion interval",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-11-01 05:04:23 -07:00
|
|
|
{
|
|
|
|
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
|
|
|
|
sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil},
|
|
|
|
sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}},
|
|
|
|
expected: []chunks.Sample{
|
|
|
|
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
|
|
|
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
|
|
|
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
expectedMinMaxTimes: []minMaxTimes{{3, 6}},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "one histogram chunk intersect with later deletion interval",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
{
|
|
|
|
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
|
|
|
|
sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil},
|
|
|
|
sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
|
|
|
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
|
|
|
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
|
|
|
}),
|
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 3}},
|
2023-05-19 01:24:06 -07:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "one float histogram chunk",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
{
|
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
|
|
|
|
sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
|
|
|
|
sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
|
|
|
},
|
|
|
|
},
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
|
|
|
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
|
|
|
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
|
|
|
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
|
|
|
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
|
|
|
|
}),
|
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
|
2023-05-19 01:24:06 -07:00
|
|
|
},
|
|
|
|
{
|
2023-11-01 05:04:23 -07:00
|
|
|
name: "one float histogram chunk intersect with earlier deletion interval",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-11-01 05:04:23 -07:00
|
|
|
{
|
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
|
|
|
|
sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
|
|
|
|
sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}},
|
|
|
|
expected: []chunks.Sample{
|
|
|
|
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
|
|
|
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
|
|
|
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
expectedMinMaxTimes: []minMaxTimes{{3, 6}},
|
2023-05-19 01:24:06 -07:00
|
|
|
},
|
|
|
|
{
|
2023-11-01 05:04:23 -07:00
|
|
|
name: "one float histogram chunk intersect with later deletion interval",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
{
|
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
|
|
|
|
sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
|
|
|
|
sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
|
|
|
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
|
|
|
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
|
|
|
}),
|
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 3}},
|
2023-05-19 01:24:06 -07:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "one gauge histogram chunk",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
{
|
|
|
|
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
|
|
|
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
|
|
|
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
|
|
|
},
|
|
|
|
},
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
|
|
|
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
|
|
|
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
|
|
|
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
|
|
|
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
|
|
|
}),
|
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
|
2023-05-19 01:24:06 -07:00
|
|
|
},
|
|
|
|
{
|
2023-11-01 05:04:23 -07:00
|
|
|
name: "one gauge histogram chunk intersect with earlier deletion interval",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-11-01 05:04:23 -07:00
|
|
|
{
|
|
|
|
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
|
|
|
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
|
|
|
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}},
|
|
|
|
expected: []chunks.Sample{
|
|
|
|
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
|
|
|
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
|
|
|
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
expectedMinMaxTimes: []minMaxTimes{{3, 6}},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "one gauge histogram chunk intersect with later deletion interval",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
{
|
|
|
|
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
|
|
|
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
|
|
|
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
|
|
|
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
|
|
|
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
|
|
|
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
|
|
|
}),
|
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 3}},
|
2023-05-19 01:24:06 -07:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "one gauge float histogram",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
{
|
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
|
|
|
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
|
|
|
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
|
|
|
},
|
|
|
|
},
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
|
|
|
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
|
|
|
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
|
|
|
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
|
|
|
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
|
|
|
}),
|
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
|
2023-05-19 01:24:06 -07:00
|
|
|
},
|
|
|
|
{
|
2023-11-01 05:04:23 -07:00
|
|
|
name: "one gauge float histogram chunk intersect with earlier deletion interval",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-11-01 05:04:23 -07:00
|
|
|
{
|
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
|
|
|
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
|
|
|
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}},
|
|
|
|
expected: []chunks.Sample{
|
|
|
|
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
|
|
|
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
|
|
|
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
expectedMinMaxTimes: []minMaxTimes{{3, 6}},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "one gauge float histogram chunk intersect with later deletion interval",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
{
|
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
|
|
|
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
|
|
|
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
2023-08-24 06:21:17 -07:00
|
|
|
expected: []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
|
|
|
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
2023-08-24 06:21:17 -07:00
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
2023-05-19 01:24:06 -07:00
|
|
|
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
|
|
|
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
|
|
|
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
|
|
|
}),
|
|
|
|
},
|
2023-11-01 04:53:41 -07:00
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 3}},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "three full mixed chunks",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-11-01 04:53:41 -07:00
|
|
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
|
|
|
{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
|
|
|
|
sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
|
|
|
|
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
expected: []chunks.Sample{
|
|
|
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
|
|
|
|
sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
|
|
|
|
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}, {10, 203}},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "three full mixed chunks in different order",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-11-01 04:53:41 -07:00
|
|
|
{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
|
|
|
|
sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
|
|
|
|
},
|
|
|
|
{sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}},
|
|
|
|
{
|
|
|
|
sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
|
|
|
|
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
expected: []chunks.Sample{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}, sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
|
|
|
|
sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
|
|
|
|
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
expectedMinMaxTimes: []minMaxTimes{{7, 9}, {11, 16}, {100, 203}},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "three full mixed chunks in different order intersect with deletion interval",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-11-01 04:53:41 -07:00
|
|
|
{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
|
|
|
|
sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
|
|
|
|
},
|
|
|
|
{sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}},
|
|
|
|
{
|
|
|
|
sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
|
|
|
|
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
intervals: tombstones.Intervals{{Mint: 8, Maxt: 11}, {Mint: 15, Maxt: 150}},
|
|
|
|
|
|
|
|
expected: []chunks.Sample{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{12, 3, nil, nil}, sample{13, 5, nil, nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
expectedMinMaxTimes: []minMaxTimes{{7, 7}, {12, 13}, {203, 203}},
|
2023-05-19 01:24:06 -07:00
|
|
|
},
|
2023-11-01 05:04:23 -07:00
|
|
|
{
|
|
|
|
name: "three full mixed chunks overlapping",
|
2023-11-28 02:14:29 -08:00
|
|
|
samples: [][]chunks.Sample{
|
2023-11-01 05:04:23 -07:00
|
|
|
{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
|
|
|
|
sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
|
|
|
|
},
|
|
|
|
{sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}},
|
|
|
|
{
|
|
|
|
sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
|
|
|
|
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
expected: []chunks.Sample{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}, sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
|
|
|
|
sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
|
|
|
|
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
expectedMinMaxTimes: []minMaxTimes{{7, 12}, {11, 16}, {10, 203}},
|
2023-05-19 01:24:06 -07:00
|
|
|
},
|
2023-11-28 02:14:29 -08:00
|
|
|
{
|
|
|
|
// This case won't actually happen until OOO native histograms is implemented.
|
|
|
|
// Issue: https://github.com/prometheus/prometheus/issues/11220.
|
|
|
|
name: "int histogram iterables with counter resets",
|
|
|
|
samples: [][]chunks.Sample{
|
|
|
|
{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
|
|
|
sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
|
|
|
|
// Counter reset should be detected when chunks are created from the iterable.
|
|
|
|
sample{12, 0, tsdbutil.GenerateTestHistogram(5), nil},
|
|
|
|
sample{15, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
|
|
|
sample{16, 0, tsdbutil.GenerateTestHistogram(7), nil},
|
|
|
|
// Counter reset should be detected when chunks are created from the iterable.
|
|
|
|
sample{17, 0, tsdbutil.GenerateTestHistogram(5), nil},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
|
|
|
sample{19, 0, tsdbutil.GenerateTestHistogram(7), nil},
|
|
|
|
// Counter reset should be detected when chunks are created from the iterable.
|
|
|
|
sample{20, 0, tsdbutil.GenerateTestHistogram(5), nil},
|
|
|
|
sample{21, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
expected: []chunks.Sample{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
|
|
|
sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
|
|
|
|
sample{12, 0, tsdbutil.GenerateTestHistogram(5), nil},
|
|
|
|
sample{15, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
|
|
|
sample{16, 0, tsdbutil.GenerateTestHistogram(7), nil},
|
|
|
|
sample{17, 0, tsdbutil.GenerateTestHistogram(5), nil},
|
|
|
|
sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
|
|
|
sample{19, 0, tsdbutil.GenerateTestHistogram(7), nil},
|
|
|
|
sample{20, 0, tsdbutil.GenerateTestHistogram(5), nil},
|
|
|
|
sample{21, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
|
|
|
sample{8, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(9)), nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{12, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil},
|
|
|
|
sample{15, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
|
|
|
|
sample{16, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(7)), nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{17, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
|
|
|
sample{19, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(7)), nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{20, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil},
|
|
|
|
sample{21, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
expectedMinMaxTimes: []minMaxTimes{
|
|
|
|
{7, 8},
|
|
|
|
{12, 16},
|
|
|
|
{17, 17},
|
|
|
|
{18, 19},
|
|
|
|
{20, 21},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Skipping chunk test - can't create a single chunk for each
|
|
|
|
// sample slice since there are counter resets in the middle of
|
|
|
|
// the slices.
|
|
|
|
skipChunkTest: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// This case won't actually happen until OOO native histograms is implemented.
|
|
|
|
// Issue: https://github.com/prometheus/prometheus/issues/11220.
|
|
|
|
name: "float histogram iterables with counter resets",
|
|
|
|
samples: [][]chunks.Sample{
|
|
|
|
{
|
|
|
|
sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)},
|
|
|
|
sample{8, 0, nil, tsdbutil.GenerateTestFloatHistogram(9)},
|
|
|
|
// Counter reset should be detected when chunks are created from the iterable.
|
|
|
|
sample{12, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
|
|
|
|
sample{15, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
|
|
|
sample{16, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
|
|
|
|
// Counter reset should be detected when chunks are created from the iterable.
|
|
|
|
sample{17, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
sample{18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
|
|
|
sample{19, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
|
|
|
|
// Counter reset should be detected when chunks are created from the iterable.
|
|
|
|
sample{20, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
|
|
|
|
sample{21, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
expected: []chunks.Sample{
|
|
|
|
sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)},
|
|
|
|
sample{8, 0, nil, tsdbutil.GenerateTestFloatHistogram(9)},
|
|
|
|
sample{12, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
|
|
|
|
sample{15, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
|
|
|
sample{16, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
|
|
|
|
sample{17, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
|
|
|
|
sample{18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
|
|
|
sample{19, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
|
|
|
|
sample{20, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
|
|
|
|
sample{21, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)},
|
|
|
|
sample{8, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(9))},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{12, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))},
|
|
|
|
sample{15, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
|
|
|
|
sample{16, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(7))},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{17, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
|
|
|
sample{19, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(7))},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{20, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))},
|
|
|
|
sample{21, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
expectedMinMaxTimes: []minMaxTimes{
|
|
|
|
{7, 8},
|
|
|
|
{12, 16},
|
|
|
|
{17, 17},
|
|
|
|
{18, 19},
|
|
|
|
{20, 21},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Skipping chunk test - can't create a single chunk for each
|
|
|
|
// sample slice since there are counter resets in the middle of
|
|
|
|
// the slices.
|
|
|
|
skipChunkTest: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// This case won't actually happen until OOO native histograms is implemented.
|
|
|
|
// Issue: https://github.com/prometheus/prometheus/issues/11220.
|
|
|
|
name: "iterables with mixed encodings and counter resets",
|
|
|
|
samples: [][]chunks.Sample{
|
|
|
|
{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
|
|
|
sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
|
|
|
|
sample{9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)},
|
|
|
|
sample{10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)},
|
|
|
|
sample{11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)},
|
|
|
|
sample{12, 13, nil, nil},
|
|
|
|
sample{13, 14, nil, nil},
|
|
|
|
sample{14, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
|
|
|
// Counter reset should be detected when chunks are created from the iterable.
|
|
|
|
sample{15, 0, tsdbutil.GenerateTestHistogram(7), nil},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
|
|
|
sample{19, 45, nil, nil},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
expected: []chunks.Sample{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
|
|
|
sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
|
|
|
|
sample{9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)},
|
|
|
|
sample{10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)},
|
|
|
|
sample{11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)},
|
|
|
|
sample{12, 13, nil, nil},
|
|
|
|
sample{13, 14, nil, nil},
|
|
|
|
sample{14, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
|
|
|
sample{15, 0, tsdbutil.GenerateTestHistogram(7), nil},
|
|
|
|
sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
|
|
|
sample{19, 45, nil, nil},
|
|
|
|
},
|
|
|
|
expectedChks: []chunks.Meta{
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
|
|
|
sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)},
|
|
|
|
sample{10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)},
|
|
|
|
sample{11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{12, 13, nil, nil},
|
|
|
|
sample{13, 14, nil, nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{14, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{15, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(7)), nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
|
|
|
}),
|
|
|
|
assureChunkFromSamples(t, []chunks.Sample{
|
|
|
|
sample{19, 45, nil, nil},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
expectedMinMaxTimes: []minMaxTimes{
|
|
|
|
{7, 8},
|
|
|
|
{9, 11},
|
|
|
|
{12, 13},
|
|
|
|
{14, 14},
|
|
|
|
{15, 15},
|
|
|
|
{18, 18},
|
|
|
|
{19, 19},
|
|
|
|
},
|
|
|
|
|
|
|
|
skipChunkTest: true,
|
|
|
|
},
|
2020-07-31 08:03:02 -07:00
|
|
|
}
|
|
|
|
for _, tc := range cases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
t.Run("sample", func(t *testing.T) {
|
2023-11-28 02:14:29 -08:00
|
|
|
var f *fakeChunksReader
|
|
|
|
var chkMetas []chunks.Meta
|
|
|
|
// If the test case wants to skip the chunks test, it probably
|
|
|
|
// means you can't create valid chunks from sample slices,
|
|
|
|
// therefore create iterables over the samples instead.
|
|
|
|
if tc.skipChunkTest {
|
|
|
|
f, chkMetas = createFakeReaderAndIterables(tc.samples...)
|
|
|
|
} else {
|
|
|
|
f, chkMetas = createFakeReaderAndNotPopulatedChunks(tc.samples...)
|
|
|
|
}
|
2022-09-20 11:27:44 -07:00
|
|
|
it := &populateWithDelSeriesIterator{}
|
|
|
|
it.reset(ulid.ULID{}, f, chkMetas, tc.intervals)
|
2020-07-31 08:03:02 -07:00
|
|
|
|
2023-08-24 06:21:17 -07:00
|
|
|
var r []chunks.Sample
|
2020-07-31 08:03:02 -07:00
|
|
|
if tc.seek != 0 {
|
2021-11-28 23:54:23 -08:00
|
|
|
require.Equal(t, tc.seekSuccess, it.Seek(tc.seek) == chunkenc.ValFloat)
|
|
|
|
require.Equal(t, tc.seekSuccess, it.Seek(tc.seek) == chunkenc.ValFloat) // Next one should be noop.
|
2020-07-31 08:03:02 -07:00
|
|
|
|
|
|
|
if tc.seekSuccess {
|
|
|
|
// After successful seek iterator is ready. Grab the value.
|
|
|
|
t, v := it.At()
|
2023-03-30 10:50:13 -07:00
|
|
|
r = append(r, sample{t: t, f: v})
|
2017-04-09 07:00:25 -07:00
|
|
|
}
|
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
expandedResult, err := storage.ExpandSamples(it, newSample)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-07-31 08:03:02 -07:00
|
|
|
r = append(r, expandedResult...)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, tc.expected, r)
|
2020-07-31 08:03:02 -07:00
|
|
|
})
|
|
|
|
t.Run("chunk", func(t *testing.T) {
|
2023-11-28 02:14:29 -08:00
|
|
|
if tc.skipChunkTest {
|
|
|
|
t.Skip()
|
|
|
|
}
|
|
|
|
f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.samples...)
|
|
|
|
it := &populateWithDelChunkSeriesIterator{}
|
|
|
|
it.reset(ulid.ULID{}, f, chkMetas, tc.intervals)
|
|
|
|
|
|
|
|
if tc.seek != 0 {
|
|
|
|
// Chunk iterator does not have Seek method.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
expandedResult, err := storage.ExpandChunks(it)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// We don't care about ref IDs for comparison, only chunk's samples matters.
|
|
|
|
rmChunkRefs(expandedResult)
|
|
|
|
rmChunkRefs(tc.expectedChks)
|
|
|
|
require.Equal(t, tc.expectedChks, expandedResult)
|
|
|
|
|
|
|
|
for i, meta := range expandedResult {
|
|
|
|
require.Equal(t, tc.expectedMinMaxTimes[i].minTime, meta.MinTime)
|
|
|
|
require.Equal(t, tc.expectedMinMaxTimes[i].maxTime, meta.MaxTime)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
t.Run("iterables", func(t *testing.T) {
|
|
|
|
if tc.skipIterableTest {
|
|
|
|
t.Skip()
|
|
|
|
}
|
|
|
|
f, chkMetas := createFakeReaderAndIterables(tc.samples...)
|
2022-09-20 11:27:44 -07:00
|
|
|
it := &populateWithDelChunkSeriesIterator{}
|
|
|
|
it.reset(ulid.ULID{}, f, chkMetas, tc.intervals)
|
2019-02-14 05:29:41 -08:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
if tc.seek != 0 {
|
|
|
|
// Chunk iterator does not have Seek method.
|
|
|
|
return
|
2019-02-14 05:29:41 -08:00
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
expandedResult, err := storage.ExpandChunks(it)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2017-04-09 07:00:25 -07:00
|
|
|
|
2020-08-07 00:57:25 -07:00
|
|
|
// We don't care about ref IDs for comparison, only chunk's samples matters.
|
2020-07-31 08:03:02 -07:00
|
|
|
rmChunkRefs(expandedResult)
|
|
|
|
rmChunkRefs(tc.expectedChks)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, tc.expectedChks, expandedResult)
|
2023-11-01 04:53:41 -07:00
|
|
|
|
|
|
|
for i, meta := range expandedResult {
|
|
|
|
require.Equal(t, tc.expectedMinMaxTimes[i].minTime, meta.MinTime)
|
|
|
|
require.Equal(t, tc.expectedMinMaxTimes[i].maxTime, meta.MaxTime)
|
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
})
|
2017-04-09 07:00:25 -07:00
|
|
|
})
|
2020-07-31 08:03:02 -07:00
|
|
|
}
|
2017-04-09 07:00:25 -07:00
|
|
|
}
|
2017-05-01 02:03:56 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
func rmChunkRefs(chks []chunks.Meta) {
|
|
|
|
for i := range chks {
|
|
|
|
chks[i].Ref = 0
|
2017-06-13 00:51:22 -07:00
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
}
|
2017-06-13 00:51:22 -07:00
|
|
|
|
2023-11-01 04:53:41 -07:00
|
|
|
func checkCurrVal(t *testing.T, valType chunkenc.ValueType, it *populateWithDelSeriesIterator, expectedTs, expectedValue int) {
|
|
|
|
switch valType {
|
|
|
|
case chunkenc.ValFloat:
|
2023-11-01 04:53:41 -07:00
|
|
|
ts, v := it.At()
|
2023-11-01 04:53:41 -07:00
|
|
|
require.Equal(t, int64(expectedTs), ts)
|
|
|
|
require.Equal(t, float64(expectedValue), v)
|
|
|
|
case chunkenc.ValHistogram:
|
2024-01-23 08:02:14 -08:00
|
|
|
ts, h := it.AtHistogram(nil)
|
2023-11-01 04:53:41 -07:00
|
|
|
require.Equal(t, int64(expectedTs), ts)
|
2023-11-01 04:53:41 -07:00
|
|
|
h.CounterResetHint = histogram.UnknownCounterReset
|
2023-11-01 04:53:41 -07:00
|
|
|
require.Equal(t, tsdbutil.GenerateTestHistogram(expectedValue), h)
|
|
|
|
case chunkenc.ValFloatHistogram:
|
2024-01-23 08:02:14 -08:00
|
|
|
ts, h := it.AtFloatHistogram(nil)
|
2023-11-01 04:53:41 -07:00
|
|
|
require.Equal(t, int64(expectedTs), ts)
|
2023-11-01 04:53:41 -07:00
|
|
|
h.CounterResetHint = histogram.UnknownCounterReset
|
2023-11-01 04:53:41 -07:00
|
|
|
require.Equal(t, tsdbutil.GenerateTestFloatHistogram(expectedValue), h)
|
|
|
|
default:
|
|
|
|
panic("unexpected value type")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
// Regression for: https://github.com/prometheus/tsdb/pull/97
|
|
|
|
func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) {
|
2023-11-01 04:53:41 -07:00
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
valType chunkenc.ValueType
|
|
|
|
chks [][]chunks.Sample
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "float",
|
|
|
|
valType: chunkenc.ValFloat,
|
|
|
|
chks: [][]chunks.Sample{
|
|
|
|
{},
|
|
|
|
{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
|
|
|
|
{sample{4, 4, nil, nil}, sample{5, 5, nil, nil}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "histogram",
|
|
|
|
valType: chunkenc.ValHistogram,
|
|
|
|
chks: [][]chunks.Sample{
|
|
|
|
{},
|
|
|
|
{sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil}},
|
|
|
|
{sample{4, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(5), nil}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "float histogram",
|
|
|
|
valType: chunkenc.ValFloatHistogram,
|
|
|
|
chks: [][]chunks.Sample{
|
|
|
|
{},
|
|
|
|
{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)}},
|
|
|
|
{sample{4, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...)
|
|
|
|
it := &populateWithDelSeriesIterator{}
|
|
|
|
it.reset(ulid.ULID{}, f, chkMetas, nil)
|
|
|
|
require.Equal(t, tc.valType, it.Seek(1))
|
|
|
|
require.Equal(t, tc.valType, it.Seek(2))
|
|
|
|
require.Equal(t, tc.valType, it.Seek(2))
|
|
|
|
checkCurrVal(t, tc.valType, it, 2, 2)
|
|
|
|
require.Equal(t, int64(0), chkMetas[0].MinTime)
|
|
|
|
require.Equal(t, int64(1), chkMetas[1].MinTime)
|
|
|
|
require.Equal(t, int64(4), chkMetas[2].MinTime)
|
|
|
|
})
|
|
|
|
}
|
2017-06-13 00:51:22 -07:00
|
|
|
}
|
|
|
|
|
2017-06-30 06:06:27 -07:00
|
|
|
// Regression when seeked chunks were still found via binary search and we always
|
|
|
|
// skipped to the end when seeking a value in the current chunk.
|
2020-07-31 08:03:02 -07:00
|
|
|
func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) {
|
2023-11-01 04:53:41 -07:00
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
valType chunkenc.ValueType
|
|
|
|
chks [][]chunks.Sample
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "float",
|
|
|
|
valType: chunkenc.ValFloat,
|
|
|
|
chks: [][]chunks.Sample{
|
|
|
|
{},
|
|
|
|
{sample{1, 2, nil, nil}, sample{3, 4, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}},
|
|
|
|
{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "histogram",
|
|
|
|
valType: chunkenc.ValHistogram,
|
|
|
|
chks: [][]chunks.Sample{
|
|
|
|
{},
|
|
|
|
{sample{1, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}},
|
|
|
|
{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "float histogram",
|
|
|
|
valType: chunkenc.ValFloatHistogram,
|
|
|
|
chks: [][]chunks.Sample{
|
|
|
|
{},
|
|
|
|
{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}},
|
|
|
|
{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2023-11-01 04:53:41 -07:00
|
|
|
|
2023-11-01 04:53:41 -07:00
|
|
|
for _, tc := range cases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...)
|
|
|
|
it := &populateWithDelSeriesIterator{}
|
|
|
|
it.reset(ulid.ULID{}, f, chkMetas, nil)
|
|
|
|
require.Equal(t, tc.valType, it.Next())
|
|
|
|
checkCurrVal(t, tc.valType, it, 1, 2)
|
|
|
|
require.Equal(t, tc.valType, it.Seek(4))
|
|
|
|
checkCurrVal(t, tc.valType, it, 5, 6)
|
|
|
|
require.Equal(t, int64(0), chkMetas[0].MinTime)
|
|
|
|
require.Equal(t, int64(1), chkMetas[1].MinTime)
|
|
|
|
require.Equal(t, int64(0), chkMetas[2].MinTime)
|
|
|
|
})
|
|
|
|
}
|
2017-06-30 06:06:27 -07:00
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
func TestPopulateWithDelSeriesIterator_SeekWithMinTime(t *testing.T) {
|
2023-11-01 04:53:41 -07:00
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
valType chunkenc.ValueType
|
|
|
|
chks [][]chunks.Sample
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "float",
|
|
|
|
valType: chunkenc.ValFloat,
|
|
|
|
chks: [][]chunks.Sample{
|
|
|
|
{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{6, 8, nil, nil}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "histogram",
|
|
|
|
valType: chunkenc.ValHistogram,
|
|
|
|
chks: [][]chunks.Sample{
|
|
|
|
{sample{1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{6, 0, tsdbutil.GenerateTestHistogram(8), nil}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "float histogram",
|
|
|
|
valType: chunkenc.ValFloatHistogram,
|
|
|
|
chks: [][]chunks.Sample{
|
|
|
|
{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...)
|
|
|
|
it := &populateWithDelSeriesIterator{}
|
|
|
|
it.reset(ulid.ULID{}, f, chkMetas, nil)
|
|
|
|
require.Equal(t, chunkenc.ValNone, it.Seek(7))
|
|
|
|
require.Equal(t, tc.valType, it.Seek(3))
|
|
|
|
require.Equal(t, int64(1), chkMetas[0].MinTime)
|
|
|
|
})
|
|
|
|
}
|
2017-05-01 02:03:56 -07:00
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
// Regression when calling Next() with a time bounded to fit within two samples.
|
|
|
|
// Seek gets called and advances beyond the max time, which was just accepted as a valid sample.
|
|
|
|
func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) {
|
2023-11-01 04:53:41 -07:00
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
valType chunkenc.ValueType
|
|
|
|
chks [][]chunks.Sample
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "float",
|
|
|
|
valType: chunkenc.ValFloat,
|
|
|
|
chks: [][]chunks.Sample{
|
|
|
|
{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "histogram",
|
|
|
|
valType: chunkenc.ValHistogram,
|
|
|
|
chks: [][]chunks.Sample{
|
|
|
|
{sample{1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "float histogram",
|
|
|
|
valType: chunkenc.ValFloatHistogram,
|
|
|
|
chks: [][]chunks.Sample{
|
|
|
|
{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2017-05-01 02:03:56 -07:00
|
|
|
|
2023-11-01 04:53:41 -07:00
|
|
|
for _, tc := range cases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...)
|
|
|
|
it := &populateWithDelSeriesIterator{}
|
|
|
|
it.reset(ulid.ULID{}, f, chkMetas, tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64}))
|
|
|
|
require.Equal(t, chunkenc.ValNone, it.Next())
|
|
|
|
require.Equal(t, int64(1), chkMetas[0].MinTime)
|
|
|
|
})
|
|
|
|
}
|
2017-05-01 02:03:56 -07:00
|
|
|
}
|
2017-07-05 07:19:28 -07:00
|
|
|
|
|
|
|
// Test the cost of merging series sets for different number of merged sets and their size.
|
|
|
|
// The subset are all equivalent so this does not capture merging of partial or non-overlapping sets well.
|
2020-02-06 07:58:38 -08:00
|
|
|
// TODO(bwplotka): Merge with storage merged series set benchmark.
|
2017-07-05 07:19:28 -07:00
|
|
|
func BenchmarkMergedSeriesSet(b *testing.B) {
|
2021-10-22 01:06:44 -07:00
|
|
|
sel := func(sets []storage.SeriesSet) storage.SeriesSet {
|
2020-07-31 08:03:02 -07:00
|
|
|
return storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge)
|
2017-07-05 07:19:28 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, k := range []int{
|
|
|
|
100,
|
|
|
|
1000,
|
|
|
|
10000,
|
2018-09-13 09:34:26 -07:00
|
|
|
20000,
|
2017-07-05 07:19:28 -07:00
|
|
|
} {
|
|
|
|
for _, j := range []int{1, 2, 4, 8, 16, 32} {
|
|
|
|
b.Run(fmt.Sprintf("series=%d,blocks=%d", k, j), func(b *testing.B) {
|
2018-10-25 02:32:57 -07:00
|
|
|
lbls, err := labels.ReadLabels(filepath.Join("testdata", "20kseries.json"), k)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, err)
|
2017-07-05 07:19:28 -07:00
|
|
|
|
|
|
|
sort.Sort(labels.Slice(lbls))
|
|
|
|
|
2020-02-06 07:58:38 -08:00
|
|
|
in := make([][]storage.Series, j)
|
2017-07-05 07:19:28 -07:00
|
|
|
|
|
|
|
for _, l := range lbls {
|
|
|
|
l2 := l
|
|
|
|
for j := range in {
|
2020-07-31 08:03:02 -07:00
|
|
|
in[j] = append(in[j], storage.NewListSeries(l2, nil))
|
2017-07-05 07:19:28 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-02-06 07:58:38 -08:00
|
|
|
var sets []storage.SeriesSet
|
2017-07-05 07:19:28 -07:00
|
|
|
for _, s := range in {
|
2018-09-21 01:07:35 -07:00
|
|
|
sets = append(sets, newMockSeriesSet(s))
|
2017-07-05 07:19:28 -07:00
|
|
|
}
|
|
|
|
ms := sel(sets)
|
|
|
|
|
|
|
|
i := 0
|
|
|
|
for ms.Next() {
|
|
|
|
i++
|
|
|
|
}
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, ms.Err())
|
2023-12-07 03:35:01 -08:00
|
|
|
require.Len(b, lbls, i)
|
2017-07-05 07:19:28 -07:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-11-30 06:34:49 -08:00
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
type mockChunkReader map[chunks.ChunkRef]chunkenc.Chunk
|
2017-11-30 06:34:49 -08:00
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
func (cr mockChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
2022-09-20 10:05:50 -07:00
|
|
|
chk, ok := cr[meta.Ref]
|
2017-11-30 06:34:49 -08:00
|
|
|
if ok {
|
2023-11-28 02:14:29 -08:00
|
|
|
return chk, nil, nil
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
return nil, nil, errors.New("Chunk with ref not found")
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (cr mockChunkReader) Close() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDeletedIterator(t *testing.T) {
|
2017-12-21 02:55:58 -08:00
|
|
|
chk := chunkenc.NewXORChunk()
|
2017-11-30 06:34:49 -08:00
|
|
|
app, err := chk.Appender()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2017-11-30 06:34:49 -08:00
|
|
|
// Insert random stuff from (0, 1000).
|
|
|
|
act := make([]sample, 1000)
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
act[i].t = int64(i)
|
2023-03-30 10:50:13 -07:00
|
|
|
act[i].f = rand.Float64()
|
|
|
|
app.Append(act[i].t, act[i].f)
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
2019-09-19 02:15:41 -07:00
|
|
|
r tombstones.Intervals
|
2017-11-30 06:34:49 -08:00
|
|
|
}{
|
2019-09-19 02:15:41 -07:00
|
|
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 20}}},
|
|
|
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 10}, {Mint: 12, Maxt: 20}, {Mint: 21, Maxt: 23}, {Mint: 25, Maxt: 30}}},
|
|
|
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 10}, {Mint: 12, Maxt: 20}, {Mint: 20, Maxt: 30}}},
|
|
|
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 10}, {Mint: 12, Maxt: 23}, {Mint: 25, Maxt: 30}}},
|
|
|
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 23}, {Mint: 12, Maxt: 20}, {Mint: 25, Maxt: 30}}},
|
|
|
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 23}, {Mint: 12, Maxt: 20}, {Mint: 25, Maxt: 3000}}},
|
|
|
|
{r: tombstones.Intervals{{Mint: 0, Maxt: 2000}}},
|
|
|
|
{r: tombstones.Intervals{{Mint: 500, Maxt: 2000}}},
|
|
|
|
{r: tombstones.Intervals{{Mint: 0, Maxt: 200}}},
|
|
|
|
{r: tombstones.Intervals{{Mint: 1000, Maxt: 20000}}},
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
|
2020-02-06 07:58:38 -08:00
|
|
|
for _, c := range cases {
|
2020-04-23 02:00:30 -07:00
|
|
|
i := int64(-1)
|
2020-11-09 08:51:25 -08:00
|
|
|
it := &DeletedIterator{Iter: chk.Iterator(nil), Intervals: c.r[:]}
|
2020-04-23 02:00:30 -07:00
|
|
|
ranges := c.r[:]
|
2021-11-28 23:54:23 -08:00
|
|
|
for it.Next() == chunkenc.ValFloat {
|
2020-02-06 07:58:38 -08:00
|
|
|
i++
|
|
|
|
for _, tr := range ranges {
|
|
|
|
if tr.InBounds(i) {
|
|
|
|
i = tr.Maxt + 1
|
|
|
|
ranges = ranges[1:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Less(t, i, int64(1000))
|
2020-02-06 07:58:38 -08:00
|
|
|
|
2020-04-23 02:00:30 -07:00
|
|
|
ts, v := it.At()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, act[i].t, ts)
|
2023-03-30 10:50:13 -07:00
|
|
|
require.Equal(t, act[i].f, v)
|
2020-04-23 02:00:30 -07:00
|
|
|
}
|
|
|
|
// There has been an extra call to Next().
|
|
|
|
i++
|
|
|
|
for _, tr := range ranges {
|
|
|
|
if tr.InBounds(i) {
|
|
|
|
i = tr.Maxt + 1
|
|
|
|
ranges = ranges[1:]
|
2020-02-06 07:58:38 -08:00
|
|
|
}
|
2020-04-23 02:00:30 -07:00
|
|
|
}
|
2020-02-06 07:58:38 -08:00
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.GreaterOrEqual(t, i, int64(1000))
|
|
|
|
require.NoError(t, it.Err())
|
2020-02-06 07:58:38 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDeletedIterator_WithSeek(t *testing.T) {
|
|
|
|
chk := chunkenc.NewXORChunk()
|
|
|
|
app, err := chk.Appender()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-02-06 07:58:38 -08:00
|
|
|
// Insert random stuff from (0, 1000).
|
|
|
|
act := make([]sample, 1000)
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
act[i].t = int64(i)
|
2023-03-30 10:50:13 -07:00
|
|
|
act[i].f = float64(i)
|
|
|
|
app.Append(act[i].t, act[i].f)
|
2020-02-06 07:58:38 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
2020-04-23 02:00:30 -07:00
|
|
|
r tombstones.Intervals
|
|
|
|
seek int64
|
|
|
|
ok bool
|
|
|
|
seekedTs int64
|
2020-02-06 07:58:38 -08:00
|
|
|
}{
|
2020-04-23 02:00:30 -07:00
|
|
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 20}}, seek: 1, ok: true, seekedTs: 21},
|
|
|
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 20}}, seek: 20, ok: true, seekedTs: 21},
|
|
|
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 20}}, seek: 10, ok: true, seekedTs: 21},
|
|
|
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 20}}, seek: 999, ok: true, seekedTs: 999},
|
|
|
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 20}}, seek: 1000, ok: false},
|
|
|
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 23}, {Mint: 24, Maxt: 40}, {Mint: 45, Maxt: 3000}}, seek: 1, ok: true, seekedTs: 41},
|
|
|
|
{r: tombstones.Intervals{{Mint: 5, Maxt: 23}, {Mint: 24, Maxt: 40}, {Mint: 41, Maxt: 3000}}, seek: 5, ok: false},
|
|
|
|
{r: tombstones.Intervals{{Mint: 0, Maxt: 2000}}, seek: 10, ok: false},
|
|
|
|
{r: tombstones.Intervals{{Mint: 500, Maxt: 2000}}, seek: 10, ok: true, seekedTs: 10},
|
|
|
|
{r: tombstones.Intervals{{Mint: 500, Maxt: 2000}}, seek: 501, ok: false},
|
2020-02-06 07:58:38 -08:00
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
for _, c := range cases {
|
2020-11-09 08:51:25 -08:00
|
|
|
it := &DeletedIterator{Iter: chk.Iterator(nil), Intervals: c.r[:]}
|
2017-11-30 06:34:49 -08:00
|
|
|
|
2021-11-28 23:54:23 -08:00
|
|
|
require.Equal(t, c.ok, it.Seek(c.seek) == chunkenc.ValFloat)
|
2020-04-23 02:00:30 -07:00
|
|
|
if c.ok {
|
2021-11-28 23:54:23 -08:00
|
|
|
ts := it.AtT()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, c.seekedTs, ts)
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type series struct {
|
|
|
|
l labels.Labels
|
|
|
|
chunks []chunks.Meta
|
|
|
|
}
|
|
|
|
|
|
|
|
type mockIndex struct {
|
2021-11-06 03:10:04 -07:00
|
|
|
series map[storage.SeriesRef]series
|
|
|
|
postings map[labels.Label][]storage.SeriesRef
|
2020-01-01 03:21:42 -08:00
|
|
|
symbols map[string]struct{}
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
func newMockIndex() mockIndex {
|
|
|
|
ix := mockIndex{
|
2021-11-06 03:10:04 -07:00
|
|
|
series: make(map[storage.SeriesRef]series),
|
|
|
|
postings: make(map[labels.Label][]storage.SeriesRef),
|
2020-01-01 03:21:42 -08:00
|
|
|
symbols: make(map[string]struct{}),
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
return ix
|
|
|
|
}
|
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
func (m mockIndex) Symbols() index.StringIter {
|
|
|
|
l := []string{}
|
|
|
|
for s := range m.symbols {
|
|
|
|
l = append(l, s)
|
|
|
|
}
|
|
|
|
sort.Strings(l)
|
|
|
|
return index.NewStringListIter(l)
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
func (m *mockIndex) AddSeries(ref storage.SeriesRef, l labels.Labels, chunks ...chunks.Meta) error {
|
2017-11-30 06:34:49 -08:00
|
|
|
if _, ok := m.series[ref]; ok {
|
2023-11-14 05:04:31 -08:00
|
|
|
return fmt.Errorf("series with reference %d already added", ref)
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
2022-03-09 14:17:29 -08:00
|
|
|
l.Range(func(lbl labels.Label) {
|
2017-11-30 06:34:49 -08:00
|
|
|
m.symbols[lbl.Name] = struct{}{}
|
|
|
|
m.symbols[lbl.Value] = struct{}{}
|
2022-03-09 14:17:29 -08:00
|
|
|
})
|
2017-11-30 06:34:49 -08:00
|
|
|
|
|
|
|
s := series{l: l}
|
|
|
|
// Actual chunk data is not stored in the index.
|
|
|
|
for _, c := range chunks {
|
|
|
|
c.Chunk = nil
|
|
|
|
s.chunks = append(s.chunks, c)
|
|
|
|
}
|
|
|
|
m.series[ref] = s
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m mockIndex) WritePostings(name, value string, it index.Postings) error {
|
|
|
|
l := labels.Label{Name: name, Value: value}
|
|
|
|
if _, ok := m.postings[l]; ok {
|
2023-11-14 05:04:31 -08:00
|
|
|
return fmt.Errorf("postings for %s already added", l)
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
ep, err := index.ExpandPostings(it)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
m.postings[l] = ep
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m mockIndex) Close() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-09-14 07:02:04 -07:00
|
|
|
func (m mockIndex) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
|
|
|
|
values, _ := m.LabelValues(ctx, name, matchers...)
|
2020-06-25 06:10:29 -07:00
|
|
|
sort.Strings(values)
|
|
|
|
return values, nil
|
|
|
|
}
|
|
|
|
|
2023-09-14 07:02:04 -07:00
|
|
|
func (m mockIndex) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
|
2021-07-20 05:38:08 -07:00
|
|
|
var values []string
|
2021-02-09 09:38:35 -08:00
|
|
|
|
|
|
|
if len(matchers) == 0 {
|
|
|
|
for l := range m.postings {
|
|
|
|
if l.Name == name {
|
|
|
|
values = append(values, l.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return values, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, series := range m.series {
|
|
|
|
for _, matcher := range matchers {
|
|
|
|
if matcher.Matches(series.l.Get(matcher.Name)) {
|
2021-07-20 05:38:08 -07:00
|
|
|
// TODO(colega): shouldn't we check all the matchers before adding this to the values?
|
2021-02-09 09:38:35 -08:00
|
|
|
values = append(values, series.l.Get(name))
|
|
|
|
}
|
2020-01-01 03:21:42 -08:00
|
|
|
}
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
2021-02-09 09:38:35 -08:00
|
|
|
|
Replace StringTuples with []string
Benchmarks show slight cpu/allocs improvements.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Head/n="1"-4 269978625 235305110 -12.84%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 129739974 121646193 -6.24%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 123826274 122056253 -1.43%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 126962188 130038235 +2.42%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 6423653989 5991126455 -6.73%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 6934647521 7033370634 +1.42%
BenchmarkPostingsForMatchers/Head/i=~""-4 1177781285 1121497736 -4.78%
BenchmarkPostingsForMatchers/Head/i!=""-4 7033680256 7246094991 +3.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 293702332 287440212 -2.13%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 307628268 307039964 -0.19%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 512247746 480003862 -6.29%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 361199794 367066917 +1.62%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 478863761 476037784 -0.59%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 103394659 102902098 -0.48%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 482552781 475453903 -1.47%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 559257389 589297047 +5.37%
BenchmarkPostingsForMatchers/Block/n="1"-4 36492 37012 +1.42%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 557788 611903 +9.70%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 554443 573814 +3.49%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 553227 553826 +0.11%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113855090 111707221 -1.89%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 133994674 136520728 +1.89%
BenchmarkPostingsForMatchers/Block/i=~""-4 38138091 36299898 -4.82%
BenchmarkPostingsForMatchers/Block/i!=""-4 28861213 27396723 -5.07%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112699941 110853868 -1.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 113198026 111389742 -1.60%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 28994069 27363804 -5.62%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 29709406 28589223 -3.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 134695119 135736971 +0.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 26783286 25826928 -3.57%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 134733254 134116739 -0.46%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 160713937 158802768 -1.19%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Head/n="1"-4 36 36 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 42 40 -4.76%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 61 59 -3.28%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 100088 100087 -0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 100053 100051 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 100087 100085 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 44 42 -4.55%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 50 48 -4.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 100076 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 100077 100075 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 100077 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 11167 11165 -0.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 100082 100080 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 111265 111261 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 13 -13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 12 10 -16.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 100040 100038 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 100045 100043 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 100041 100039 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 23 21 -8.70%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 100046 100044 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 100050 100048 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 100049 100047 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 11150 11148 -0.02%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 100055 100053 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 111238 111234 -0.00%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Head/n="1"-4 10887816 10887817 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 5456792 5456712 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 258254408 258254328 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 273912888 273912904 +0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 17266680 17266600 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 273912416 273912336 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 7062578 7062498 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 7062770 7062690 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 28152346 28152266 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 22721178 22721098 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 22721336 22721224 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 3623804 3623733 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 22721480 22721400 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 24816652 24816444 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 1544 1464 -5.18%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1606114 1606045 -0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17264709 17264629 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 17264780 17264696 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 17264680 17264600 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1606253 1606165 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1606445 1606348 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17264808 17264728 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17264936 17264856 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17264965 17264885 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3148262 3148182 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17265141 17265061 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20416944 20416784 -0.00%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2020-01-01 03:38:01 -08:00
|
|
|
return values, nil
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
|
2023-09-14 07:02:04 -07:00
|
|
|
func (m mockIndex) LabelValueFor(_ context.Context, id storage.SeriesRef, label string) (string, error) {
|
2021-02-09 09:38:35 -08:00
|
|
|
return m.series[id].l.Get(label), nil
|
|
|
|
}
|
|
|
|
|
2023-09-14 01:39:51 -07:00
|
|
|
func (m mockIndex) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) {
|
2021-07-20 05:38:08 -07:00
|
|
|
namesMap := make(map[string]bool)
|
|
|
|
for _, id := range ids {
|
2022-03-09 14:17:29 -08:00
|
|
|
m.series[id].l.Range(func(lbl labels.Label) {
|
2021-07-20 05:38:08 -07:00
|
|
|
namesMap[lbl.Name] = true
|
2022-03-09 14:17:29 -08:00
|
|
|
})
|
2021-07-20 05:38:08 -07:00
|
|
|
}
|
|
|
|
names := make([]string, 0, len(namesMap))
|
|
|
|
for name := range namesMap {
|
|
|
|
names = append(names, name)
|
|
|
|
}
|
|
|
|
return names, nil
|
|
|
|
}
|
|
|
|
|
2023-09-13 08:45:06 -07:00
|
|
|
func (m mockIndex) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
res := make([]index.Postings, 0, len(values))
|
|
|
|
for _, value := range values {
|
|
|
|
l := labels.Label{Name: name, Value: value}
|
|
|
|
res = append(res, index.NewListPostings(m.postings[l]))
|
|
|
|
}
|
2023-09-13 08:45:06 -07:00
|
|
|
return index.Merge(ctx, res...), nil
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m mockIndex) SortedPostings(p index.Postings) index.Postings {
|
|
|
|
ep, err := index.ExpandPostings(p)
|
|
|
|
if err != nil {
|
2023-11-16 10:54:41 -08:00
|
|
|
return index.ErrPostings(fmt.Errorf("expand postings: %w", err))
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
sort.Slice(ep, func(i, j int) bool {
|
|
|
|
return labels.Compare(m.series[ep[i]].l, m.series[ep[j]].l) < 0
|
|
|
|
})
|
|
|
|
return index.NewListPostings(ep)
|
|
|
|
}
|
|
|
|
|
2024-01-29 03:57:27 -08:00
|
|
|
func (m mockIndex) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings {
|
|
|
|
out := make([]storage.SeriesRef, 0, 128)
|
|
|
|
|
|
|
|
for p.Next() {
|
|
|
|
ref := p.At()
|
|
|
|
s, ok := m.series[ref]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the series belong to the shard.
|
|
|
|
if s.l.Hash()%shardCount != shardIndex {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
out = append(out, ref)
|
|
|
|
}
|
|
|
|
|
|
|
|
return index.NewListPostings(out)
|
|
|
|
}
|
|
|
|
|
2022-12-15 10:19:15 -08:00
|
|
|
func (m mockIndex) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
2017-11-30 06:34:49 -08:00
|
|
|
s, ok := m.series[ref]
|
|
|
|
if !ok {
|
2020-03-16 14:52:02 -07:00
|
|
|
return storage.ErrNotFound
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
2022-12-15 10:19:15 -08:00
|
|
|
builder.Assign(s.l)
|
2017-11-30 06:34:49 -08:00
|
|
|
*chks = append((*chks)[:0], s.chunks...)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-09-14 01:39:51 -07:00
|
|
|
func (m mockIndex) LabelNames(_ context.Context, matchers ...*labels.Matcher) ([]string, error) {
|
2020-01-01 03:21:42 -08:00
|
|
|
names := map[string]struct{}{}
|
2021-07-20 05:38:08 -07:00
|
|
|
if len(matchers) == 0 {
|
|
|
|
for l := range m.postings {
|
|
|
|
names[l.Name] = struct{}{}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for _, series := range m.series {
|
|
|
|
matches := true
|
|
|
|
for _, matcher := range matchers {
|
|
|
|
matches = matches || matcher.Matches(series.l.Get(matcher.Name))
|
|
|
|
if !matches {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if matches {
|
2022-03-09 14:17:29 -08:00
|
|
|
series.l.Range(func(lbl labels.Label) {
|
2021-07-20 05:38:08 -07:00
|
|
|
names[lbl.Name] = struct{}{}
|
2022-03-09 14:17:29 -08:00
|
|
|
})
|
2021-07-20 05:38:08 -07:00
|
|
|
}
|
|
|
|
}
|
2018-11-07 07:52:41 -08:00
|
|
|
}
|
2020-01-01 03:21:42 -08:00
|
|
|
l := make([]string, 0, len(names))
|
|
|
|
for name := range names {
|
|
|
|
l = append(l, name)
|
|
|
|
}
|
|
|
|
sort.Strings(l)
|
|
|
|
return l, nil
|
2018-11-07 07:52:41 -08:00
|
|
|
}
|
2019-01-28 03:24:49 -08:00
|
|
|
|
2019-02-14 05:29:41 -08:00
|
|
|
func BenchmarkQueryIterator(b *testing.B) {
|
|
|
|
cases := []struct {
|
|
|
|
numBlocks int
|
|
|
|
numSeries int
|
|
|
|
numSamplesPerSeriesPerBlock int
|
|
|
|
overlapPercentages []int // >=0, <=100, this is w.r.t. the previous block.
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
numBlocks: 20,
|
|
|
|
numSeries: 1000,
|
|
|
|
numSamplesPerSeriesPerBlock: 20000,
|
|
|
|
overlapPercentages: []int{0, 10, 30},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
for _, overlapPercentage := range c.overlapPercentages {
|
|
|
|
benchMsg := fmt.Sprintf("nBlocks=%d,nSeries=%d,numSamplesPerSeriesPerBlock=%d,overlap=%d%%",
|
|
|
|
c.numBlocks, c.numSeries, c.numSamplesPerSeriesPerBlock, overlapPercentage)
|
|
|
|
|
|
|
|
b.Run(benchMsg, func(b *testing.B) {
|
2022-01-22 01:55:01 -08:00
|
|
|
dir := b.TempDir()
|
2019-02-14 05:29:41 -08:00
|
|
|
|
|
|
|
var (
|
|
|
|
blocks []*Block
|
|
|
|
overlapDelta = int64(overlapPercentage * c.numSamplesPerSeriesPerBlock / 100)
|
|
|
|
prefilledLabels []map[string]string
|
2020-02-06 07:58:38 -08:00
|
|
|
generatedSeries []storage.Series
|
2019-02-14 05:29:41 -08:00
|
|
|
)
|
|
|
|
for i := int64(0); i < int64(c.numBlocks); i++ {
|
|
|
|
offset := i * overlapDelta
|
|
|
|
mint := i*int64(c.numSamplesPerSeriesPerBlock) - offset
|
|
|
|
maxt := mint + int64(c.numSamplesPerSeriesPerBlock) - 1
|
|
|
|
if len(prefilledLabels) == 0 {
|
|
|
|
generatedSeries = genSeries(c.numSeries, 10, mint, maxt)
|
|
|
|
for _, s := range generatedSeries {
|
|
|
|
prefilledLabels = append(prefilledLabels, s.Labels().Map())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
|
|
|
|
}
|
|
|
|
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, err)
|
2019-02-14 05:29:41 -08:00
|
|
|
blocks = append(blocks, block)
|
|
|
|
defer block.Close()
|
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
qblocks := make([]storage.Querier, 0, len(blocks))
|
2019-02-14 05:29:41 -08:00
|
|
|
for _, blk := range blocks {
|
|
|
|
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, err)
|
2020-07-31 08:03:02 -07:00
|
|
|
qblocks = append(qblocks, q)
|
2019-02-14 05:29:41 -08:00
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
sq := storage.NewMergeQuerier(qblocks, nil, storage.ChainedSeriesMerge)
|
2019-02-14 05:29:41 -08:00
|
|
|
defer sq.Close()
|
|
|
|
|
2019-11-18 11:53:33 -08:00
|
|
|
benchQuery(b, c.numSeries, sq, labels.Selector{labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")})
|
2019-02-14 05:29:41 -08:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkQuerySeek(b *testing.B) {
|
|
|
|
cases := []struct {
|
|
|
|
numBlocks int
|
|
|
|
numSeries int
|
|
|
|
numSamplesPerSeriesPerBlock int
|
|
|
|
overlapPercentages []int // >=0, <=100, this is w.r.t. the previous block.
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
numBlocks: 20,
|
|
|
|
numSeries: 100,
|
|
|
|
numSamplesPerSeriesPerBlock: 2000,
|
|
|
|
overlapPercentages: []int{0, 10, 30, 50},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
for _, overlapPercentage := range c.overlapPercentages {
|
|
|
|
benchMsg := fmt.Sprintf("nBlocks=%d,nSeries=%d,numSamplesPerSeriesPerBlock=%d,overlap=%d%%",
|
|
|
|
c.numBlocks, c.numSeries, c.numSamplesPerSeriesPerBlock, overlapPercentage)
|
|
|
|
|
|
|
|
b.Run(benchMsg, func(b *testing.B) {
|
2022-01-22 01:55:01 -08:00
|
|
|
dir := b.TempDir()
|
2019-02-14 05:29:41 -08:00
|
|
|
|
|
|
|
var (
|
|
|
|
blocks []*Block
|
|
|
|
overlapDelta = int64(overlapPercentage * c.numSamplesPerSeriesPerBlock / 100)
|
|
|
|
prefilledLabels []map[string]string
|
2020-02-06 07:58:38 -08:00
|
|
|
generatedSeries []storage.Series
|
2019-02-14 05:29:41 -08:00
|
|
|
)
|
|
|
|
for i := int64(0); i < int64(c.numBlocks); i++ {
|
|
|
|
offset := i * overlapDelta
|
|
|
|
mint := i*int64(c.numSamplesPerSeriesPerBlock) - offset
|
|
|
|
maxt := mint + int64(c.numSamplesPerSeriesPerBlock) - 1
|
|
|
|
if len(prefilledLabels) == 0 {
|
|
|
|
generatedSeries = genSeries(c.numSeries, 10, mint, maxt)
|
|
|
|
for _, s := range generatedSeries {
|
|
|
|
prefilledLabels = append(prefilledLabels, s.Labels().Map())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
|
|
|
|
}
|
|
|
|
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, err)
|
2019-02-14 05:29:41 -08:00
|
|
|
blocks = append(blocks, block)
|
|
|
|
defer block.Close()
|
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
qblocks := make([]storage.Querier, 0, len(blocks))
|
2019-02-14 05:29:41 -08:00
|
|
|
for _, blk := range blocks {
|
|
|
|
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, err)
|
2020-07-31 08:03:02 -07:00
|
|
|
qblocks = append(qblocks, q)
|
2019-02-14 05:29:41 -08:00
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
sq := storage.NewMergeQuerier(qblocks, nil, storage.ChainedSeriesMerge)
|
2019-02-14 05:29:41 -08:00
|
|
|
defer sq.Close()
|
|
|
|
|
|
|
|
mint := blocks[0].meta.MinTime
|
|
|
|
maxt := blocks[len(blocks)-1].meta.MaxTime
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
b.ReportAllocs()
|
|
|
|
|
2022-09-20 10:16:45 -07:00
|
|
|
var it chunkenc.Iterator
|
2023-09-12 03:37:38 -07:00
|
|
|
ss := sq.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
|
2019-02-14 05:29:41 -08:00
|
|
|
for ss.Next() {
|
2022-09-20 10:16:45 -07:00
|
|
|
it = ss.At().Iterator(it)
|
2019-02-14 05:29:41 -08:00
|
|
|
for t := mint; t <= maxt; t++ {
|
|
|
|
it.Seek(t)
|
|
|
|
}
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, it.Err())
|
2019-02-14 05:29:41 -08:00
|
|
|
}
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, ss.Err())
|
2023-12-07 03:35:01 -08:00
|
|
|
require.Empty(b, ss.Warnings())
|
2019-02-14 05:29:41 -08:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
|
2019-05-27 04:24:46 -07:00
|
|
|
// Refer to https://github.com/prometheus/prometheus/issues/2651.
|
|
|
|
func BenchmarkSetMatcher(b *testing.B) {
|
|
|
|
cases := []struct {
|
|
|
|
numBlocks int
|
|
|
|
numSeries int
|
|
|
|
numSamplesPerSeriesPerBlock int
|
|
|
|
cardinality int
|
|
|
|
pattern string
|
|
|
|
}{
|
|
|
|
// The first three cases are to find out whether the set
|
|
|
|
// matcher is always faster than regex matcher.
|
|
|
|
{
|
|
|
|
numBlocks: 1,
|
|
|
|
numSeries: 1,
|
|
|
|
numSamplesPerSeriesPerBlock: 10,
|
|
|
|
cardinality: 100,
|
2020-02-05 02:53:12 -08:00
|
|
|
pattern: "1|2|3|4|5|6|7|8|9|10",
|
2019-05-27 04:24:46 -07:00
|
|
|
},
|
|
|
|
{
|
|
|
|
numBlocks: 1,
|
|
|
|
numSeries: 15,
|
|
|
|
numSamplesPerSeriesPerBlock: 10,
|
|
|
|
cardinality: 100,
|
2020-02-05 02:53:12 -08:00
|
|
|
pattern: "1|2|3|4|5|6|7|8|9|10",
|
2019-05-27 04:24:46 -07:00
|
|
|
},
|
|
|
|
{
|
|
|
|
numBlocks: 1,
|
|
|
|
numSeries: 15,
|
|
|
|
numSamplesPerSeriesPerBlock: 10,
|
|
|
|
cardinality: 100,
|
2020-02-05 02:53:12 -08:00
|
|
|
pattern: "1|2|3",
|
2019-05-27 04:24:46 -07:00
|
|
|
},
|
|
|
|
// Big data sizes benchmarks.
|
|
|
|
{
|
|
|
|
numBlocks: 20,
|
|
|
|
numSeries: 1000,
|
|
|
|
numSamplesPerSeriesPerBlock: 10,
|
|
|
|
cardinality: 100,
|
2020-02-05 02:53:12 -08:00
|
|
|
pattern: "1|2|3",
|
2019-05-27 04:24:46 -07:00
|
|
|
},
|
|
|
|
{
|
|
|
|
numBlocks: 20,
|
|
|
|
numSeries: 1000,
|
|
|
|
numSamplesPerSeriesPerBlock: 10,
|
|
|
|
cardinality: 100,
|
2020-02-05 02:53:12 -08:00
|
|
|
pattern: "1|2|3|4|5|6|7|8|9|10",
|
2019-05-27 04:24:46 -07:00
|
|
|
},
|
|
|
|
// Increase cardinality.
|
|
|
|
{
|
|
|
|
numBlocks: 1,
|
|
|
|
numSeries: 100000,
|
|
|
|
numSamplesPerSeriesPerBlock: 10,
|
|
|
|
cardinality: 100000,
|
2020-02-05 02:53:12 -08:00
|
|
|
pattern: "1|2|3|4|5|6|7|8|9|10",
|
2019-05-27 04:24:46 -07:00
|
|
|
},
|
|
|
|
{
|
|
|
|
numBlocks: 1,
|
|
|
|
numSeries: 500000,
|
|
|
|
numSamplesPerSeriesPerBlock: 10,
|
|
|
|
cardinality: 500000,
|
2020-02-05 02:53:12 -08:00
|
|
|
pattern: "1|2|3|4|5|6|7|8|9|10",
|
2019-05-27 04:24:46 -07:00
|
|
|
},
|
|
|
|
{
|
|
|
|
numBlocks: 10,
|
|
|
|
numSeries: 500000,
|
|
|
|
numSamplesPerSeriesPerBlock: 10,
|
|
|
|
cardinality: 500000,
|
2020-02-05 02:53:12 -08:00
|
|
|
pattern: "1|2|3|4|5|6|7|8|9|10",
|
2019-05-27 04:24:46 -07:00
|
|
|
},
|
|
|
|
{
|
|
|
|
numBlocks: 1,
|
|
|
|
numSeries: 1000000,
|
|
|
|
numSamplesPerSeriesPerBlock: 10,
|
|
|
|
cardinality: 1000000,
|
2020-02-05 02:53:12 -08:00
|
|
|
pattern: "1|2|3|4|5|6|7|8|9|10",
|
2019-05-27 04:24:46 -07:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
2022-01-22 01:55:01 -08:00
|
|
|
dir := b.TempDir()
|
2019-05-27 04:24:46 -07:00
|
|
|
|
|
|
|
var (
|
|
|
|
blocks []*Block
|
|
|
|
prefilledLabels []map[string]string
|
2020-02-06 07:58:38 -08:00
|
|
|
generatedSeries []storage.Series
|
2019-05-27 04:24:46 -07:00
|
|
|
)
|
|
|
|
for i := int64(0); i < int64(c.numBlocks); i++ {
|
|
|
|
mint := i * int64(c.numSamplesPerSeriesPerBlock)
|
|
|
|
maxt := mint + int64(c.numSamplesPerSeriesPerBlock) - 1
|
|
|
|
if len(prefilledLabels) == 0 {
|
|
|
|
generatedSeries = genSeries(c.numSeries, 10, mint, maxt)
|
|
|
|
for _, s := range generatedSeries {
|
|
|
|
prefilledLabels = append(prefilledLabels, s.Labels().Map())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
|
|
|
|
}
|
|
|
|
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, err)
|
2019-05-27 04:24:46 -07:00
|
|
|
blocks = append(blocks, block)
|
|
|
|
defer block.Close()
|
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
qblocks := make([]storage.Querier, 0, len(blocks))
|
2019-05-27 04:24:46 -07:00
|
|
|
for _, blk := range blocks {
|
|
|
|
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, err)
|
2020-07-31 08:03:02 -07:00
|
|
|
qblocks = append(qblocks, q)
|
2019-05-27 04:24:46 -07:00
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
|
|
|
|
sq := storage.NewMergeQuerier(qblocks, nil, storage.ChainedSeriesMerge)
|
|
|
|
defer sq.Close()
|
2019-05-27 04:24:46 -07:00
|
|
|
|
|
|
|
benchMsg := fmt.Sprintf("nSeries=%d,nBlocks=%d,cardinality=%d,pattern=\"%s\"", c.numSeries, c.numBlocks, c.cardinality, c.pattern)
|
|
|
|
b.Run(benchMsg, func(b *testing.B) {
|
|
|
|
b.ResetTimer()
|
|
|
|
b.ReportAllocs()
|
|
|
|
for n := 0; n < b.N; n++ {
|
2023-09-12 03:37:38 -07:00
|
|
|
ss := sq.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "test", c.pattern))
|
2020-06-09 09:57:31 -07:00
|
|
|
for ss.Next() {
|
|
|
|
}
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, ss.Err())
|
2023-12-07 03:35:01 -08:00
|
|
|
require.Empty(b, ss.Warnings())
|
2019-05-27 04:24:46 -07:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
func TestPostingsForMatchers(t *testing.T) {
|
2023-05-10 19:53:35 -07:00
|
|
|
ctx := context.Background()
|
|
|
|
|
2022-01-22 01:55:01 -08:00
|
|
|
chunkDir := t.TempDir()
|
2021-02-09 06:12:48 -08:00
|
|
|
opts := DefaultHeadOptions()
|
|
|
|
opts.ChunkRange = 1000
|
|
|
|
opts.ChunkDirRoot = chunkDir
|
2022-09-20 10:05:50 -07:00
|
|
|
h, err := NewHead(nil, nil, nil, nil, opts, nil)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
defer func() {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, h.Close())
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
}()
|
|
|
|
|
2020-07-30 04:36:56 -07:00
|
|
|
app := h.Appender(context.Background())
|
2021-02-18 04:07:00 -08:00
|
|
|
app.Append(0, labels.FromStrings("n", "1"), 0, 0)
|
|
|
|
app.Append(0, labels.FromStrings("n", "1", "i", "a"), 0, 0)
|
|
|
|
app.Append(0, labels.FromStrings("n", "1", "i", "b"), 0, 0)
|
|
|
|
app.Append(0, labels.FromStrings("n", "2"), 0, 0)
|
|
|
|
app.Append(0, labels.FromStrings("n", "2.5"), 0, 0)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, app.Commit())
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
|
|
|
|
cases := []struct {
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers []*labels.Matcher
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp []labels.Labels
|
|
|
|
}{
|
|
|
|
// Simple equals.
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1"),
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchEqual, "i", "a")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchEqual, "i", "missing")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "missing", "")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1"),
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
labels.FromStrings("n", "2"),
|
2019-05-27 04:24:46 -07:00
|
|
|
labels.FromStrings("n", "2.5"),
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
},
|
|
|
|
},
|
|
|
|
// Not equals.
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotEqual, "n", "1")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "2"),
|
2019-05-27 04:24:46 -07:00
|
|
|
labels.FromStrings("n", "2.5"),
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotEqual, "i", "")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotEqual, "missing", "")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotEqual, "i", "a")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1"),
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotEqual, "i", "")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// Regex.
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "^1$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1"),
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^a$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^a?$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1"),
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "^$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1"),
|
|
|
|
labels.FromStrings("n", "2"),
|
2019-05-27 04:24:46 -07:00
|
|
|
labels.FromStrings("n", "2.5"),
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1"),
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^.+$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// Not regex.
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "^1$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "2"),
|
2019-05-27 04:24:46 -07:00
|
|
|
labels.FromStrings("n", "2.5"),
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
},
|
|
|
|
},
|
2023-05-10 15:46:14 -07:00
|
|
|
{
|
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "1")},
|
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "2"),
|
|
|
|
labels.FromStrings("n", "2.5"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "1|2.5")},
|
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "2"),
|
|
|
|
},
|
|
|
|
},
|
2023-05-30 04:49:22 -07:00
|
|
|
{
|
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "(1|2.5)")},
|
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "2"),
|
|
|
|
},
|
|
|
|
},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1"),
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a?$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.*$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.+$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// Combinations.
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotEqual, "i", ""), labels.MustNewMatcher(labels.MatchEqual, "i", "a")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-11-18 11:53:33 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotEqual, "i", "b"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^(b|a).*$")},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
},
|
|
|
|
},
|
2019-05-27 04:24:46 -07:00
|
|
|
// Set optimization for Regex.
|
|
|
|
// Refer to https://github.com/prometheus/prometheus/issues/2651.
|
|
|
|
{
|
2020-02-05 02:53:12 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "1|2")},
|
2019-05-27 04:24:46 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1"),
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
labels.FromStrings("n", "2"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-02-05 02:53:12 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "a|b")},
|
2019-05-27 04:24:46 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
},
|
|
|
|
},
|
2023-05-30 04:49:22 -07:00
|
|
|
{
|
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "(a|b)")},
|
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1", "i", "a"),
|
|
|
|
labels.FromStrings("n", "1", "i", "b"),
|
|
|
|
},
|
|
|
|
},
|
2019-05-27 04:24:46 -07:00
|
|
|
{
|
2020-02-05 02:53:12 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "x1|2")},
|
2019-05-27 04:24:46 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "2"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-02-05 02:53:12 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "2|2\\.5")},
|
2019-05-27 04:24:46 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "2"),
|
|
|
|
labels.FromStrings("n", "2.5"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// Empty value.
|
|
|
|
{
|
2020-02-05 02:53:12 -08:00
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "c||d")},
|
2019-05-27 04:24:46 -07:00
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1"),
|
|
|
|
labels.FromStrings("n", "2"),
|
|
|
|
labels.FromStrings("n", "2.5"),
|
|
|
|
},
|
|
|
|
},
|
2023-05-30 04:49:22 -07:00
|
|
|
{
|
|
|
|
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "(c||d)")},
|
|
|
|
exp: []labels.Labels{
|
|
|
|
labels.FromStrings("n", "1"),
|
|
|
|
labels.FromStrings("n", "2"),
|
|
|
|
labels.FromStrings("n", "2.5"),
|
|
|
|
},
|
|
|
|
},
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
}
|
|
|
|
|
2020-03-25 12:13:47 -07:00
|
|
|
ir, err := h.Index()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
|
|
|
|
for _, c := range cases {
|
2023-05-10 15:46:14 -07:00
|
|
|
name := ""
|
|
|
|
for i, matcher := range c.matchers {
|
|
|
|
if i > 0 {
|
|
|
|
name += ","
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
}
|
2023-05-10 15:46:14 -07:00
|
|
|
name += matcher.String()
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
}
|
2023-05-10 15:46:14 -07:00
|
|
|
t.Run(name, func(t *testing.T) {
|
|
|
|
exp := map[string]struct{}{}
|
|
|
|
for _, l := range c.exp {
|
|
|
|
exp[l.String()] = struct{}{}
|
|
|
|
}
|
2023-05-10 19:53:35 -07:00
|
|
|
p, err := PostingsForMatchers(ctx, ir, c.matchers...)
|
2023-05-10 15:46:14 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var builder labels.ScratchBuilder
|
|
|
|
for p.Next() {
|
|
|
|
require.NoError(t, ir.Series(p.At(), &builder, &[]chunks.Meta{}))
|
|
|
|
lbls := builder.Labels()
|
|
|
|
if _, ok := exp[lbls.String()]; !ok {
|
|
|
|
t.Errorf("Evaluating %v, unexpected result %s", c.matchers, lbls.String())
|
|
|
|
} else {
|
|
|
|
delete(exp, lbls.String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.NoError(t, p.Err())
|
2021-09-02 08:43:54 -07:00
|
|
|
require.Empty(t, exp, "Evaluating %v", c.matchers)
|
2023-05-10 15:46:14 -07:00
|
|
|
})
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
}
|
|
|
|
}
|
2019-04-30 00:17:07 -07:00
|
|
|
|
2023-08-29 02:03:27 -07:00
|
|
|
// TestQuerierIndexQueriesRace tests the index queries with racing appends.
|
|
|
|
func TestQuerierIndexQueriesRace(t *testing.T) {
|
|
|
|
const testRepeats = 1000
|
|
|
|
|
|
|
|
testCases := []struct {
|
|
|
|
matchers []*labels.Matcher
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
matchers: []*labels.Matcher{
|
|
|
|
// This matcher should involve the AllPostings posting list in calculating the posting lists.
|
|
|
|
labels.MustNewMatcher(labels.MatchNotEqual, labels.MetricName, "metric"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
matchers: []*labels.Matcher{
|
|
|
|
// The first matcher should be effectively the same as AllPostings, because all series have always_0=0
|
|
|
|
// If it is evaluated first, then __name__=metric will contain more series than always_0=0.
|
|
|
|
labels.MustNewMatcher(labels.MatchNotEqual, "always_0", "0"),
|
|
|
|
labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "metric"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range testCases {
|
|
|
|
c := c
|
|
|
|
t.Run(fmt.Sprintf("%v", c.matchers), func(t *testing.T) {
|
2023-09-21 03:30:08 -07:00
|
|
|
t.Parallel()
|
2023-08-29 02:03:27 -07:00
|
|
|
db := openTestDB(t, DefaultOptions(), nil)
|
|
|
|
h := db.Head()
|
|
|
|
t.Cleanup(func() {
|
|
|
|
require.NoError(t, db.Close())
|
|
|
|
})
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
wg := &sync.WaitGroup{}
|
|
|
|
wg.Add(1)
|
|
|
|
go appendSeries(t, ctx, wg, h)
|
|
|
|
t.Cleanup(wg.Wait)
|
|
|
|
t.Cleanup(cancel)
|
|
|
|
|
|
|
|
for i := 0; i < testRepeats; i++ {
|
2023-09-12 03:37:38 -07:00
|
|
|
q, err := db.Querier(math.MinInt64, math.MaxInt64)
|
2023-08-29 02:03:27 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-09-14 07:02:04 -07:00
|
|
|
values, _, err := q.LabelValues(ctx, "seq", c.matchers...)
|
2023-08-29 02:03:27 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Emptyf(t, values, `label values for label "seq" should be empty`)
|
2023-09-21 03:30:08 -07:00
|
|
|
|
|
|
|
// Sleep to give the appends some change to run.
|
|
|
|
time.Sleep(time.Millisecond)
|
2023-08-29 02:03:27 -07:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func appendSeries(t *testing.T, ctx context.Context, wg *sync.WaitGroup, h *Head) {
|
|
|
|
defer wg.Done()
|
|
|
|
|
2023-09-20 08:41:33 -07:00
|
|
|
for i := 0; ctx.Err() == nil; i++ {
|
2023-08-29 02:03:27 -07:00
|
|
|
app := h.Appender(context.Background())
|
|
|
|
_, err := app.Append(0, labels.FromStrings(labels.MetricName, "metric", "seq", strconv.Itoa(i), "always_0", "0"), 0, 0)
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = app.Commit()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Throttle down the appends to keep the test somewhat nimble.
|
2023-09-21 03:30:08 -07:00
|
|
|
// Otherwise, we end up appending thousands or millions of samples.
|
2023-08-29 02:03:27 -07:00
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-30 00:17:07 -07:00
|
|
|
// TestClose ensures that calling Close more than once doesn't block and doesn't panic.
|
|
|
|
func TestClose(t *testing.T) {
|
2022-01-22 01:55:01 -08:00
|
|
|
dir := t.TempDir()
|
2019-04-30 00:17:07 -07:00
|
|
|
|
|
|
|
createBlock(t, dir, genSeries(1, 1, 0, 10))
|
|
|
|
createBlock(t, dir, genSeries(1, 1, 10, 20))
|
|
|
|
|
2021-06-05 07:29:32 -07:00
|
|
|
db, err := Open(dir, nil, nil, DefaultOptions(), nil)
|
2024-02-01 06:18:01 -08:00
|
|
|
require.NoError(t, err, "Opening test storage failed: %s")
|
2019-04-30 00:17:07 -07:00
|
|
|
defer func() {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, db.Close())
|
2019-04-30 00:17:07 -07:00
|
|
|
}()
|
|
|
|
|
2023-09-12 03:37:38 -07:00
|
|
|
q, err := db.Querier(0, 20)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, q.Close())
|
|
|
|
require.Error(t, q.Close())
|
2019-04-30 00:17:07 -07:00
|
|
|
}
|
2019-06-07 06:41:44 -07:00
|
|
|
|
|
|
|
func BenchmarkQueries(b *testing.B) {
|
|
|
|
cases := map[string]labels.Selector{
|
2019-08-13 01:34:14 -07:00
|
|
|
"Eq Matcher: Expansion - 1": {
|
2019-11-18 11:53:33 -08:00
|
|
|
labels.MustNewMatcher(labels.MatchEqual, "la", "va"),
|
2019-06-07 06:41:44 -07:00
|
|
|
},
|
2019-08-13 01:34:14 -07:00
|
|
|
"Eq Matcher: Expansion - 2": {
|
2019-11-18 11:53:33 -08:00
|
|
|
labels.MustNewMatcher(labels.MatchEqual, "la", "va"),
|
|
|
|
labels.MustNewMatcher(labels.MatchEqual, "lb", "vb"),
|
2019-06-07 06:41:44 -07:00
|
|
|
},
|
|
|
|
|
2019-08-13 01:34:14 -07:00
|
|
|
"Eq Matcher: Expansion - 3": {
|
2019-11-18 11:53:33 -08:00
|
|
|
labels.MustNewMatcher(labels.MatchEqual, "la", "va"),
|
|
|
|
labels.MustNewMatcher(labels.MatchEqual, "lb", "vb"),
|
|
|
|
labels.MustNewMatcher(labels.MatchEqual, "lc", "vc"),
|
2019-06-07 06:41:44 -07:00
|
|
|
},
|
2019-08-13 01:34:14 -07:00
|
|
|
"Regex Matcher: Expansion - 1": {
|
2019-11-18 11:53:33 -08:00
|
|
|
labels.MustNewMatcher(labels.MatchRegexp, "la", ".*va"),
|
2019-06-07 06:41:44 -07:00
|
|
|
},
|
2019-08-13 01:34:14 -07:00
|
|
|
"Regex Matcher: Expansion - 2": {
|
2019-11-18 11:53:33 -08:00
|
|
|
labels.MustNewMatcher(labels.MatchRegexp, "la", ".*va"),
|
|
|
|
labels.MustNewMatcher(labels.MatchRegexp, "lb", ".*vb"),
|
2019-06-07 06:41:44 -07:00
|
|
|
},
|
2019-08-13 01:34:14 -07:00
|
|
|
"Regex Matcher: Expansion - 3": {
|
2019-11-18 11:53:33 -08:00
|
|
|
labels.MustNewMatcher(labels.MatchRegexp, "la", ".*va"),
|
|
|
|
labels.MustNewMatcher(labels.MatchRegexp, "lb", ".*vb"),
|
|
|
|
labels.MustNewMatcher(labels.MatchRegexp, "lc", ".*vc"),
|
2019-06-07 06:41:44 -07:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2022-09-20 10:05:50 -07:00
|
|
|
type qt struct {
|
|
|
|
typ string
|
|
|
|
querier storage.Querier
|
|
|
|
}
|
|
|
|
var queryTypes []qt // We use a slice instead of map to keep the order of test cases consistent.
|
2019-06-07 06:41:44 -07:00
|
|
|
defer func() {
|
|
|
|
for _, q := range queryTypes {
|
|
|
|
// Can't run a check for error here as some of these will fail as
|
|
|
|
// queryTypes is using the same slice for the different block queriers
|
2020-01-02 06:54:09 -08:00
|
|
|
// and would have been closed in the previous iteration.
|
2022-09-20 10:05:50 -07:00
|
|
|
q.querier.Close()
|
2019-06-07 06:41:44 -07:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
for title, selectors := range cases {
|
|
|
|
for _, nSeries := range []int{10} {
|
|
|
|
for _, nSamples := range []int64{1000, 10000, 100000} {
|
2022-01-22 01:55:01 -08:00
|
|
|
dir := b.TempDir()
|
2019-06-07 06:41:44 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
series := genSeries(nSeries, 5, 1, nSamples)
|
2019-06-07 06:41:44 -07:00
|
|
|
|
|
|
|
// Add some common labels to make the matchers select these series.
|
|
|
|
{
|
2022-03-09 14:17:29 -08:00
|
|
|
var commonLbls []labels.Label
|
2019-06-07 06:41:44 -07:00
|
|
|
for _, selector := range selectors {
|
2019-11-18 11:53:33 -08:00
|
|
|
switch selector.Type {
|
|
|
|
case labels.MatchEqual:
|
|
|
|
commonLbls = append(commonLbls, labels.Label{Name: selector.Name, Value: selector.Value})
|
|
|
|
case labels.MatchRegexp:
|
|
|
|
commonLbls = append(commonLbls, labels.Label{Name: selector.Name, Value: selector.Value})
|
2019-06-07 06:41:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for i := range commonLbls {
|
2020-07-31 08:03:02 -07:00
|
|
|
s := series[i].(*storage.SeriesEntry)
|
2022-03-09 14:17:29 -08:00
|
|
|
allLabels := commonLbls
|
|
|
|
s.Labels().Range(func(l labels.Label) {
|
|
|
|
allLabels = append(allLabels, l)
|
|
|
|
})
|
|
|
|
newS := storage.NewListSeries(labels.New(allLabels...), nil)
|
2020-07-31 08:03:02 -07:00
|
|
|
newS.SampleIteratorFn = s.SampleIteratorFn
|
|
|
|
|
|
|
|
series[i] = newS
|
2019-06-07 06:41:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-06 07:58:38 -08:00
|
|
|
qs := make([]storage.Querier, 0, 10)
|
2019-06-07 06:41:44 -07:00
|
|
|
for x := 0; x <= 10; x++ {
|
|
|
|
block, err := OpenBlock(nil, createBlock(b, dir, series), nil)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, err)
|
2023-04-09 00:08:40 -07:00
|
|
|
q, err := NewBlockQuerier(block, 1, nSamples)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, err)
|
2019-06-07 06:41:44 -07:00
|
|
|
qs = append(qs, q)
|
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
|
2022-09-20 10:05:50 -07:00
|
|
|
queryTypes = append(queryTypes, qt{"_1-Block", storage.NewMergeQuerier(qs[:1], nil, storage.ChainedSeriesMerge)})
|
|
|
|
queryTypes = append(queryTypes, qt{"_3-Blocks", storage.NewMergeQuerier(qs[0:3], nil, storage.ChainedSeriesMerge)})
|
|
|
|
queryTypes = append(queryTypes, qt{"_10-Blocks", storage.NewMergeQuerier(qs, nil, storage.ChainedSeriesMerge)})
|
2019-06-07 06:41:44 -07:00
|
|
|
|
2022-01-22 01:55:01 -08:00
|
|
|
chunkDir := b.TempDir()
|
2020-07-31 08:03:02 -07:00
|
|
|
head := createHead(b, nil, series, chunkDir)
|
2022-09-20 10:05:50 -07:00
|
|
|
qHead, err := NewBlockQuerier(NewRangeHead(head, 1, nSamples), 1, nSamples)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, err)
|
2022-09-20 10:05:50 -07:00
|
|
|
queryTypes = append(queryTypes, qt{"_Head", qHead})
|
|
|
|
|
|
|
|
for _, oooPercentage := range []int{1, 3, 5, 10} {
|
|
|
|
chunkDir := b.TempDir()
|
|
|
|
totalOOOSamples := oooPercentage * int(nSamples) / 100
|
|
|
|
oooSampleFrequency := int(nSamples) / totalOOOSamples
|
|
|
|
head := createHeadWithOOOSamples(b, nil, series, chunkDir, oooSampleFrequency)
|
|
|
|
|
|
|
|
qHead, err := NewBlockQuerier(NewRangeHead(head, 1, nSamples), 1, nSamples)
|
|
|
|
require.NoError(b, err)
|
2023-11-24 03:38:38 -08:00
|
|
|
qOOOHead, err := NewBlockQuerier(NewOOORangeHead(head, 1, nSamples, 0), 1, nSamples)
|
2022-09-20 10:05:50 -07:00
|
|
|
require.NoError(b, err)
|
|
|
|
|
|
|
|
queryTypes = append(queryTypes, qt{
|
|
|
|
fmt.Sprintf("_Head_oooPercent:%d", oooPercentage),
|
|
|
|
storage.NewMergeQuerier([]storage.Querier{qHead, qOOOHead}, nil, storage.ChainedSeriesMerge),
|
|
|
|
})
|
|
|
|
}
|
2019-06-07 06:41:44 -07:00
|
|
|
|
2022-09-20 10:05:50 -07:00
|
|
|
for _, q := range queryTypes {
|
|
|
|
b.Run(title+q.typ+"_nSeries:"+strconv.Itoa(nSeries)+"_nSamples:"+strconv.Itoa(int(nSamples)), func(b *testing.B) {
|
2019-06-07 06:41:44 -07:00
|
|
|
expExpansions, err := strconv.Atoi(string(title[len(title)-1]))
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, err)
|
2022-09-20 10:05:50 -07:00
|
|
|
benchQuery(b, expExpansions, q.querier, selectors)
|
2019-06-07 06:41:44 -07:00
|
|
|
})
|
|
|
|
}
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, head.Close())
|
2019-06-07 06:41:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-06 07:58:38 -08:00
|
|
|
func benchQuery(b *testing.B, expExpansions int, q storage.Querier, selectors labels.Selector) {
|
2019-06-07 06:41:44 -07:00
|
|
|
b.ResetTimer()
|
|
|
|
b.ReportAllocs()
|
|
|
|
for i := 0; i < b.N; i++ {
|
2023-09-12 03:37:38 -07:00
|
|
|
ss := q.Select(context.Background(), false, nil, selectors...)
|
2019-06-07 06:41:44 -07:00
|
|
|
var actualExpansions int
|
2022-09-20 10:16:45 -07:00
|
|
|
var it chunkenc.Iterator
|
2019-06-07 06:41:44 -07:00
|
|
|
for ss.Next() {
|
|
|
|
s := ss.At()
|
|
|
|
s.Labels()
|
2022-09-20 10:16:45 -07:00
|
|
|
it = s.Iterator(it)
|
2021-11-28 23:54:23 -08:00
|
|
|
for it.Next() != chunkenc.ValNone {
|
2022-09-20 10:16:45 -07:00
|
|
|
_, _ = it.At()
|
2019-06-07 06:41:44 -07:00
|
|
|
}
|
|
|
|
actualExpansions++
|
|
|
|
}
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(b, ss.Err())
|
2023-12-07 03:35:01 -08:00
|
|
|
require.Empty(b, ss.Warnings())
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(b, expExpansions, actualExpansions)
|
|
|
|
require.NoError(b, ss.Err())
|
2019-06-07 06:41:44 -07:00
|
|
|
}
|
|
|
|
}
|
2020-02-05 02:53:12 -08:00
|
|
|
|
|
|
|
// mockMatcherIndex is used to check if the regex matcher works as expected.
|
|
|
|
type mockMatcherIndex struct{}
|
|
|
|
|
|
|
|
func (m mockMatcherIndex) Symbols() index.StringIter { return nil }
|
|
|
|
|
|
|
|
func (m mockMatcherIndex) Close() error { return nil }
|
|
|
|
|
2020-06-25 06:10:29 -07:00
|
|
|
// SortedLabelValues will return error if it is called.
|
2023-09-14 07:02:04 -07:00
|
|
|
func (m mockMatcherIndex) SortedLabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) {
|
2020-06-25 06:10:29 -07:00
|
|
|
return []string{}, errors.New("sorted label values called")
|
|
|
|
}
|
|
|
|
|
2020-02-05 02:53:12 -08:00
|
|
|
// LabelValues will return error if it is called.
|
2023-09-14 07:02:04 -07:00
|
|
|
func (m mockMatcherIndex) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) {
|
2020-02-05 02:53:12 -08:00
|
|
|
return []string{}, errors.New("label values called")
|
|
|
|
}
|
|
|
|
|
2023-09-14 07:02:04 -07:00
|
|
|
func (m mockMatcherIndex) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) {
|
2021-02-09 09:38:35 -08:00
|
|
|
return "", errors.New("label value for called")
|
|
|
|
}
|
|
|
|
|
2023-09-14 01:39:51 -07:00
|
|
|
func (m mockMatcherIndex) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) {
|
2021-07-20 05:38:08 -07:00
|
|
|
return nil, errors.New("label names for for called")
|
|
|
|
}
|
|
|
|
|
2023-09-13 08:45:06 -07:00
|
|
|
func (m mockMatcherIndex) Postings(context.Context, string, ...string) (index.Postings, error) {
|
2020-02-05 02:53:12 -08:00
|
|
|
return index.EmptyPostings(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m mockMatcherIndex) SortedPostings(p index.Postings) index.Postings {
|
|
|
|
return index.EmptyPostings()
|
|
|
|
}
|
|
|
|
|
2024-01-29 03:57:27 -08:00
|
|
|
func (m mockMatcherIndex) ShardedPostings(ps index.Postings, shardIndex, shardCount uint64) index.Postings {
|
|
|
|
return ps
|
|
|
|
}
|
|
|
|
|
2022-12-15 10:19:15 -08:00
|
|
|
func (m mockMatcherIndex) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
2020-02-05 02:53:12 -08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-09-14 01:39:51 -07:00
|
|
|
func (m mockMatcherIndex) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) {
|
2021-07-20 05:38:08 -07:00
|
|
|
return []string{}, nil
|
|
|
|
}
|
2020-02-05 02:53:12 -08:00
|
|
|
|
|
|
|
func TestPostingsForMatcher(t *testing.T) {
|
2023-05-10 19:53:35 -07:00
|
|
|
ctx := context.Background()
|
|
|
|
|
2020-02-05 02:53:12 -08:00
|
|
|
cases := []struct {
|
|
|
|
matcher *labels.Matcher
|
|
|
|
hasError bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
// Equal label matcher will just return.
|
|
|
|
matcher: labels.MustNewMatcher(labels.MatchEqual, "test", "test"),
|
|
|
|
hasError: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Regex matcher which doesn't have '|' will call Labelvalues()
|
|
|
|
matcher: labels.MustNewMatcher(labels.MatchRegexp, "test", ".*"),
|
|
|
|
hasError: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
matcher: labels.MustNewMatcher(labels.MatchRegexp, "test", "a|b"),
|
|
|
|
hasError: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Test case for double quoted regex matcher
|
|
|
|
matcher: labels.MustNewMatcher(labels.MatchRegexp, "test", "^(?:a|b)$"),
|
2024-01-25 05:59:39 -08:00
|
|
|
hasError: false,
|
2020-02-05 02:53:12 -08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
2024-01-24 01:47:56 -08:00
|
|
|
t.Run(tc.matcher.String(), func(t *testing.T) {
|
|
|
|
ir := &mockMatcherIndex{}
|
|
|
|
_, err := postingsForMatcher(ctx, ir, tc.matcher)
|
|
|
|
if tc.hasError {
|
|
|
|
require.Error(t, err)
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
})
|
2020-02-05 02:53:12 -08:00
|
|
|
}
|
|
|
|
}
|
2020-08-03 03:32:56 -07:00
|
|
|
|
|
|
|
func TestBlockBaseSeriesSet(t *testing.T) {
|
|
|
|
type refdSeries struct {
|
|
|
|
lset labels.Labels
|
|
|
|
chunks []chunks.Meta
|
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
ref storage.SeriesRef
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
series []refdSeries
|
|
|
|
// Postings should be in the sorted order of the series
|
2021-11-06 03:10:04 -07:00
|
|
|
postings []storage.SeriesRef
|
2020-08-03 03:32:56 -07:00
|
|
|
|
|
|
|
expIdxs []int
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
series: []refdSeries{
|
|
|
|
{
|
2022-06-29 09:10:14 -07:00
|
|
|
lset: labels.FromStrings("a", "a"),
|
2020-08-03 03:32:56 -07:00
|
|
|
chunks: []chunks.Meta{
|
2021-10-22 01:06:44 -07:00
|
|
|
{Ref: 29},
|
|
|
|
{Ref: 45},
|
|
|
|
{Ref: 245},
|
|
|
|
{Ref: 123},
|
|
|
|
{Ref: 4232},
|
|
|
|
{Ref: 5344},
|
2020-08-03 03:32:56 -07:00
|
|
|
{Ref: 121},
|
|
|
|
},
|
|
|
|
ref: 12,
|
|
|
|
},
|
|
|
|
{
|
2022-06-29 09:10:14 -07:00
|
|
|
lset: labels.FromStrings("a", "a", "b", "b"),
|
2020-08-03 03:32:56 -07:00
|
|
|
chunks: []chunks.Meta{
|
|
|
|
{Ref: 82}, {Ref: 23}, {Ref: 234}, {Ref: 65}, {Ref: 26},
|
|
|
|
},
|
|
|
|
ref: 10,
|
|
|
|
},
|
|
|
|
{
|
2022-06-29 09:10:14 -07:00
|
|
|
lset: labels.FromStrings("b", "c"),
|
2020-08-03 03:32:56 -07:00
|
|
|
chunks: []chunks.Meta{{Ref: 8282}},
|
|
|
|
ref: 1,
|
|
|
|
},
|
|
|
|
{
|
2022-06-29 09:10:14 -07:00
|
|
|
lset: labels.FromStrings("b", "b"),
|
2020-08-03 03:32:56 -07:00
|
|
|
chunks: []chunks.Meta{
|
|
|
|
{Ref: 829}, {Ref: 239}, {Ref: 2349}, {Ref: 659}, {Ref: 269},
|
|
|
|
},
|
|
|
|
ref: 108,
|
|
|
|
},
|
|
|
|
},
|
2021-11-06 03:10:04 -07:00
|
|
|
postings: []storage.SeriesRef{12, 13, 10, 108}, // 13 doesn't exist and should just be skipped over.
|
2020-08-03 03:32:56 -07:00
|
|
|
expIdxs: []int{0, 1, 3},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
series: []refdSeries{
|
|
|
|
{
|
2022-06-29 09:10:14 -07:00
|
|
|
lset: labels.FromStrings("a", "a", "b", "b"),
|
2020-08-03 03:32:56 -07:00
|
|
|
chunks: []chunks.Meta{
|
|
|
|
{Ref: 82}, {Ref: 23}, {Ref: 234}, {Ref: 65}, {Ref: 26},
|
|
|
|
},
|
|
|
|
ref: 10,
|
|
|
|
},
|
|
|
|
{
|
2022-06-29 09:10:14 -07:00
|
|
|
lset: labels.FromStrings("b", "c"),
|
2020-08-03 03:32:56 -07:00
|
|
|
chunks: []chunks.Meta{{Ref: 8282}},
|
|
|
|
ref: 3,
|
|
|
|
},
|
|
|
|
},
|
2021-11-06 03:10:04 -07:00
|
|
|
postings: []storage.SeriesRef{},
|
2020-08-03 03:32:56 -07:00
|
|
|
expIdxs: []int{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
mi := newMockIndex()
|
|
|
|
for _, s := range tc.series {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, mi.AddSeries(s.ref, s.lset, s.chunks...))
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
bcs := &blockBaseSeriesSet{
|
|
|
|
p: index.NewListPostings(tc.postings),
|
|
|
|
index: mi,
|
|
|
|
tombstones: tombstones.NewMemTombstones(),
|
|
|
|
}
|
|
|
|
|
|
|
|
i := 0
|
|
|
|
for bcs.Next() {
|
2022-09-20 11:27:44 -07:00
|
|
|
si := populateWithDelGenericSeriesIterator{}
|
|
|
|
si.reset(bcs.blockID, bcs.chunks, bcs.curr.chks, bcs.curr.intervals)
|
2020-08-03 03:32:56 -07:00
|
|
|
idx := tc.expIdxs[i]
|
|
|
|
|
2022-09-20 11:27:44 -07:00
|
|
|
require.Equal(t, tc.series[idx].lset, bcs.curr.labels)
|
2023-11-28 02:14:29 -08:00
|
|
|
require.Equal(t, tc.series[idx].chunks, si.metas)
|
2020-08-03 03:32:56 -07:00
|
|
|
|
|
|
|
i++
|
|
|
|
}
|
2023-12-07 03:35:01 -08:00
|
|
|
require.Len(t, tc.expIdxs, i)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, bcs.Err())
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
}
|
2023-02-20 00:28:06 -08:00
|
|
|
|
|
|
|
func BenchmarkHeadChunkQuerier(b *testing.B) {
|
|
|
|
db := openTestDB(b, nil, nil)
|
|
|
|
defer func() {
|
|
|
|
require.NoError(b, db.Close())
|
|
|
|
}()
|
|
|
|
|
|
|
|
// 3h of data.
|
|
|
|
numTimeseries := 100
|
|
|
|
app := db.Appender(context.Background())
|
|
|
|
for i := 0; i < 120*6; i++ {
|
|
|
|
for j := 0; j < numTimeseries; j++ {
|
|
|
|
lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", j))
|
|
|
|
if i%10 == 0 {
|
|
|
|
require.NoError(b, app.Commit())
|
|
|
|
app = db.Appender(context.Background())
|
|
|
|
}
|
|
|
|
_, err := app.Append(0, lbls, int64(i*15)*time.Second.Milliseconds(), float64(i*100))
|
|
|
|
require.NoError(b, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.NoError(b, app.Commit())
|
|
|
|
|
2023-09-12 03:37:38 -07:00
|
|
|
querier, err := db.ChunkQuerier(math.MinInt64, math.MaxInt64)
|
2023-02-20 00:28:06 -08:00
|
|
|
require.NoError(b, err)
|
|
|
|
defer func(q storage.ChunkQuerier) {
|
|
|
|
require.NoError(b, q.Close())
|
|
|
|
}(querier)
|
|
|
|
b.ReportAllocs()
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
2023-09-12 03:37:38 -07:00
|
|
|
ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
2023-02-20 00:28:06 -08:00
|
|
|
total := 0
|
|
|
|
for ss.Next() {
|
|
|
|
cs := ss.At()
|
|
|
|
it := cs.Iterator(nil)
|
|
|
|
for it.Next() {
|
|
|
|
m := it.At()
|
|
|
|
total += m.Chunk.NumSamples()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ = total
|
|
|
|
require.NoError(b, ss.Err())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkHeadQuerier(b *testing.B) {
|
|
|
|
db := openTestDB(b, nil, nil)
|
|
|
|
defer func() {
|
|
|
|
require.NoError(b, db.Close())
|
|
|
|
}()
|
|
|
|
|
|
|
|
// 3h of data.
|
|
|
|
numTimeseries := 100
|
|
|
|
app := db.Appender(context.Background())
|
|
|
|
for i := 0; i < 120*6; i++ {
|
|
|
|
for j := 0; j < numTimeseries; j++ {
|
|
|
|
lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", j))
|
|
|
|
if i%10 == 0 {
|
|
|
|
require.NoError(b, app.Commit())
|
|
|
|
app = db.Appender(context.Background())
|
|
|
|
}
|
|
|
|
_, err := app.Append(0, lbls, int64(i*15)*time.Second.Milliseconds(), float64(i*100))
|
|
|
|
require.NoError(b, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.NoError(b, app.Commit())
|
|
|
|
|
2023-09-12 03:37:38 -07:00
|
|
|
querier, err := db.Querier(math.MinInt64, math.MaxInt64)
|
2023-02-20 00:28:06 -08:00
|
|
|
require.NoError(b, err)
|
|
|
|
defer func(q storage.Querier) {
|
|
|
|
require.NoError(b, q.Close())
|
|
|
|
}(querier)
|
|
|
|
b.ReportAllocs()
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
2023-09-12 03:37:38 -07:00
|
|
|
ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
2023-02-20 00:28:06 -08:00
|
|
|
total := int64(0)
|
|
|
|
for ss.Next() {
|
|
|
|
cs := ss.At()
|
|
|
|
it := cs.Iterator(nil)
|
|
|
|
for it.Next() != chunkenc.ValNone {
|
|
|
|
ts, _ := it.At()
|
|
|
|
total += ts
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ = total
|
|
|
|
require.NoError(b, ss.Err())
|
|
|
|
}
|
|
|
|
}
|
2023-05-19 01:24:06 -07:00
|
|
|
|
|
|
|
// This is a regression test for the case where gauge histograms were not handled by
|
|
|
|
// populateWithDelChunkSeriesIterator correctly.
|
|
|
|
func TestQueryWithDeletedHistograms(t *testing.T) {
|
2023-09-13 06:43:06 -07:00
|
|
|
ctx := context.Background()
|
2023-05-19 01:24:06 -07:00
|
|
|
testcases := map[string]func(int) (*histogram.Histogram, *histogram.FloatHistogram){
|
|
|
|
"intCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
|
|
|
|
return tsdbutil.GenerateTestHistogram(i), nil
|
|
|
|
},
|
|
|
|
"intgauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
|
|
|
|
return tsdbutil.GenerateTestGaugeHistogram(rand.Int() % 1000), nil
|
|
|
|
},
|
|
|
|
"floatCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
|
|
|
|
return nil, tsdbutil.GenerateTestFloatHistogram(i)
|
|
|
|
},
|
|
|
|
"floatGauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
|
|
|
|
return nil, tsdbutil.GenerateTestGaugeFloatHistogram(rand.Int() % 1000)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, tc := range testcases {
|
|
|
|
t.Run(name, func(t *testing.T) {
|
|
|
|
db := openTestDB(t, nil, nil)
|
|
|
|
defer func() {
|
|
|
|
require.NoError(t, db.Close())
|
|
|
|
}()
|
|
|
|
|
|
|
|
db.EnableNativeHistograms()
|
|
|
|
appender := db.Appender(context.Background())
|
|
|
|
|
|
|
|
var (
|
|
|
|
err error
|
|
|
|
seriesRef storage.SeriesRef
|
|
|
|
)
|
|
|
|
lbs := labels.FromStrings("__name__", "test", "type", name)
|
|
|
|
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
h, fh := tc(i)
|
|
|
|
seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), h, fh)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = appender.Commit()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
matcher, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Delete the last 20.
|
2023-09-13 06:43:06 -07:00
|
|
|
err = db.Delete(ctx, 80, 100, matcher)
|
2023-05-19 01:24:06 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-09-12 03:37:38 -07:00
|
|
|
chunkQuerier, err := db.ChunkQuerier(0, 100)
|
2023-05-19 01:24:06 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-09-12 03:37:38 -07:00
|
|
|
css := chunkQuerier.Select(context.Background(), false, nil, matcher)
|
2023-05-19 01:24:06 -07:00
|
|
|
|
|
|
|
seriesCount := 0
|
|
|
|
for css.Next() {
|
|
|
|
seriesCount++
|
|
|
|
series := css.At()
|
|
|
|
|
|
|
|
sampleCount := 0
|
|
|
|
it := series.Iterator(nil)
|
|
|
|
for it.Next() {
|
|
|
|
chk := it.At()
|
|
|
|
for cit := chk.Chunk.Iterator(nil); cit.Next() != chunkenc.ValNone; {
|
|
|
|
sampleCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.NoError(t, it.Err())
|
|
|
|
require.Equal(t, 80, sampleCount)
|
|
|
|
}
|
|
|
|
require.NoError(t, css.Err())
|
|
|
|
require.Equal(t, 1, seriesCount)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2023-11-29 02:24:04 -08:00
|
|
|
|
|
|
|
func TestQueryWithOneChunkCompletelyDeleted(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
db := openTestDB(t, nil, nil)
|
|
|
|
defer func() {
|
|
|
|
require.NoError(t, db.Close())
|
|
|
|
}()
|
|
|
|
|
|
|
|
db.EnableNativeHistograms()
|
|
|
|
appender := db.Appender(context.Background())
|
|
|
|
|
|
|
|
var (
|
|
|
|
err error
|
|
|
|
seriesRef storage.SeriesRef
|
|
|
|
)
|
|
|
|
lbs := labels.FromStrings("__name__", "test")
|
|
|
|
|
|
|
|
// Create an int histogram chunk with samples between 0 - 20 and 30 - 40.
|
|
|
|
for i := 0; i < 20; i++ {
|
|
|
|
h := tsdbutil.GenerateTestHistogram(1)
|
|
|
|
seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), h, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
for i := 30; i < 40; i++ {
|
|
|
|
h := tsdbutil.GenerateTestHistogram(1)
|
|
|
|
seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), h, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append some float histograms - float histograms are a different encoding
|
|
|
|
// type from int histograms so a new chunk is created.
|
|
|
|
for i := 60; i < 100; i++ {
|
|
|
|
fh := tsdbutil.GenerateTestFloatHistogram(1)
|
|
|
|
seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), nil, fh)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = appender.Commit()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
matcher, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Delete all samples from the int histogram chunk. The deletion intervals
|
|
|
|
// doesn't cover the entire histogram chunk, but does cover all the samples
|
|
|
|
// in the chunk. This case was previously not handled properly.
|
|
|
|
err = db.Delete(ctx, 0, 20, matcher)
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = db.Delete(ctx, 30, 40, matcher)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
chunkQuerier, err := db.ChunkQuerier(0, 100)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
css := chunkQuerier.Select(context.Background(), false, nil, matcher)
|
|
|
|
|
|
|
|
seriesCount := 0
|
|
|
|
for css.Next() {
|
|
|
|
seriesCount++
|
|
|
|
series := css.At()
|
|
|
|
|
|
|
|
sampleCount := 0
|
|
|
|
it := series.Iterator(nil)
|
|
|
|
for it.Next() {
|
|
|
|
chk := it.At()
|
|
|
|
cit := chk.Chunk.Iterator(nil)
|
|
|
|
for vt := cit.Next(); vt != chunkenc.ValNone; vt = cit.Next() {
|
2023-12-07 03:35:01 -08:00
|
|
|
require.Equal(t, chunkenc.ValFloatHistogram, vt, "Only float histograms expected, other sample types should have been deleted.")
|
2023-11-29 02:24:04 -08:00
|
|
|
sampleCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.NoError(t, it.Err())
|
|
|
|
require.Equal(t, 40, sampleCount)
|
|
|
|
}
|
|
|
|
require.NoError(t, css.Err())
|
|
|
|
require.Equal(t, 1, seriesCount)
|
|
|
|
}
|