prometheus/tsdb/querier_test.go

2346 lines
59 KiB
Go
Raw Normal View History

2017-04-10 11:59:45 -07:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2016-12-14 06:47:05 -08:00
package tsdb
import (
"context"
2017-07-05 07:19:28 -07:00
"fmt"
"io/ioutil"
"math"
"math/rand"
"os"
"path/filepath"
2016-12-19 02:44:11 -08:00
"sort"
"strconv"
2016-12-14 06:47:05 -08:00
"testing"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/testutil"
2016-12-14 06:47:05 -08:00
)
type mockSeriesSet struct {
next func() bool
series func() storage.Series
*: Consistent Error/Warning handling for SeriesSet iterator: Allowing Async Select (#7251) * Add errors and Warnings to SeriesSet Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Change Querier interface and refactor accordingly Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor promql/engine to propagate warnings at eval stage Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Make sure all the series from all Selects are pre-advanced Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Separate merge series sets Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Clean Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor merge querier failure handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactored and simplified fanout with improvements from incoming chunk iterator PRs. * Secondary logic is hidden, instead of weird failed series set logic we had. * Fanout is well commented * Fanout closing record all errors * MergeQuerier improved API (clearer) * deferredGenericMergeSeriesSet is not needed as we return no samples anyway for failed series sets (next = false). Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fix formatting Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix CI issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Added final tests for error handling. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. * Moved hints in populate to be allocated only when needed. * Used sync.Once in secondary Querier to achieve all-or-nothing partial response logic. * Select after first Next is done will panic. NOTE: in lazySeriesSet in theory we could just panic, I think however we can totally just return error, it will panic in expand anyway. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Utilize errWithWarnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix recently introduced expansion issue Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add tests for secondary querier error handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Implement lazy merge Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add name to test cases Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Reorganize Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Remove redundant warnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix rebase mistake Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-06-09 09:57:31 -07:00
ws func() storage.Warnings
err func() error
}
*: Consistent Error/Warning handling for SeriesSet iterator: Allowing Async Select (#7251) * Add errors and Warnings to SeriesSet Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Change Querier interface and refactor accordingly Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor promql/engine to propagate warnings at eval stage Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Make sure all the series from all Selects are pre-advanced Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Separate merge series sets Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Clean Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor merge querier failure handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactored and simplified fanout with improvements from incoming chunk iterator PRs. * Secondary logic is hidden, instead of weird failed series set logic we had. * Fanout is well commented * Fanout closing record all errors * MergeQuerier improved API (clearer) * deferredGenericMergeSeriesSet is not needed as we return no samples anyway for failed series sets (next = false). Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fix formatting Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix CI issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Added final tests for error handling. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. * Moved hints in populate to be allocated only when needed. * Used sync.Once in secondary Querier to achieve all-or-nothing partial response logic. * Select after first Next is done will panic. NOTE: in lazySeriesSet in theory we could just panic, I think however we can totally just return error, it will panic in expand anyway. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Utilize errWithWarnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix recently introduced expansion issue Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add tests for secondary querier error handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Implement lazy merge Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add name to test cases Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Reorganize Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Remove redundant warnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix rebase mistake Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-06-09 09:57:31 -07:00
func (m *mockSeriesSet) Next() bool { return m.next() }
func (m *mockSeriesSet) At() storage.Series { return m.series() }
func (m *mockSeriesSet) Err() error { return m.err() }
func (m *mockSeriesSet) Warnings() storage.Warnings { return m.ws() }
func newMockSeriesSet(list []storage.Series) *mockSeriesSet {
i := -1
return &mockSeriesSet{
next: func() bool {
i++
return i < len(list)
},
series: func() storage.Series {
return list[i]
},
err: func() error { return nil },
*: Consistent Error/Warning handling for SeriesSet iterator: Allowing Async Select (#7251) * Add errors and Warnings to SeriesSet Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Change Querier interface and refactor accordingly Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor promql/engine to propagate warnings at eval stage Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Make sure all the series from all Selects are pre-advanced Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Separate merge series sets Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Clean Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor merge querier failure handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactored and simplified fanout with improvements from incoming chunk iterator PRs. * Secondary logic is hidden, instead of weird failed series set logic we had. * Fanout is well commented * Fanout closing record all errors * MergeQuerier improved API (clearer) * deferredGenericMergeSeriesSet is not needed as we return no samples anyway for failed series sets (next = false). Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fix formatting Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix CI issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Added final tests for error handling. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. * Moved hints in populate to be allocated only when needed. * Used sync.Once in secondary Querier to achieve all-or-nothing partial response logic. * Select after first Next is done will panic. NOTE: in lazySeriesSet in theory we could just panic, I think however we can totally just return error, it will panic in expand anyway. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Utilize errWithWarnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix recently introduced expansion issue Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add tests for secondary querier error handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Implement lazy merge Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add name to test cases Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Reorganize Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Remove redundant warnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix rebase mistake Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-06-09 09:57:31 -07:00
ws: func() storage.Warnings { return nil },
}
}
func TestMergedSeriesSet(t *testing.T) {
2016-12-19 02:44:11 -08:00
cases := []struct {
// The input sets in order (samples in series in b are strictly
// after those in a).
a, b storage.SeriesSet
2017-01-05 23:08:02 -08:00
// The composition of a and b in the partition series set must yield
2016-12-19 02:44:11 -08:00
// results equivalent to the result series set.
exp storage.SeriesSet
2016-12-19 02:44:11 -08:00
}{
{
a: newMockSeriesSet([]storage.Series{
2016-12-19 02:44:11 -08:00
newSeries(map[string]string{
"a": "a",
}, []tsdbutil.Sample{
sample{t: 1, v: 1},
2016-12-19 02:44:11 -08:00
}),
}),
b: newMockSeriesSet([]storage.Series{
2016-12-19 02:44:11 -08:00
newSeries(map[string]string{
"a": "a",
}, []tsdbutil.Sample{
sample{t: 2, v: 2},
2016-12-19 02:44:11 -08:00
}),
newSeries(map[string]string{
"b": "b",
}, []tsdbutil.Sample{
sample{t: 1, v: 1},
2016-12-19 02:44:11 -08:00
}),
}),
exp: newMockSeriesSet([]storage.Series{
2016-12-19 02:44:11 -08:00
newSeries(map[string]string{
"a": "a",
}, []tsdbutil.Sample{
sample{t: 1, v: 1},
sample{t: 2, v: 2},
2016-12-19 02:44:11 -08:00
}),
newSeries(map[string]string{
"b": "b",
}, []tsdbutil.Sample{
sample{t: 1, v: 1},
2016-12-19 02:44:11 -08:00
}),
}),
},
2017-01-03 10:02:42 -08:00
{
a: newMockSeriesSet([]storage.Series{
2017-01-03 10:02:42 -08:00
newSeries(map[string]string{
"handler": "prometheus",
"instance": "127.0.0.1:9090",
}, []tsdbutil.Sample{
sample{t: 1, v: 1},
2017-01-03 10:02:42 -08:00
}),
newSeries(map[string]string{
"handler": "prometheus",
"instance": "localhost:9090",
}, []tsdbutil.Sample{
sample{t: 1, v: 2},
2017-01-03 10:02:42 -08:00
}),
}),
b: newMockSeriesSet([]storage.Series{
2017-01-03 10:02:42 -08:00
newSeries(map[string]string{
"handler": "prometheus",
"instance": "127.0.0.1:9090",
}, []tsdbutil.Sample{
sample{t: 2, v: 1},
2017-01-03 10:02:42 -08:00
}),
newSeries(map[string]string{
"handler": "query",
"instance": "localhost:9090",
}, []tsdbutil.Sample{
sample{t: 2, v: 2},
2017-01-03 10:02:42 -08:00
}),
}),
exp: newMockSeriesSet([]storage.Series{
2017-01-03 10:02:42 -08:00
newSeries(map[string]string{
"handler": "prometheus",
"instance": "127.0.0.1:9090",
}, []tsdbutil.Sample{
sample{t: 1, v: 1},
sample{t: 2, v: 1},
2017-01-03 10:02:42 -08:00
}),
newSeries(map[string]string{
"handler": "prometheus",
"instance": "localhost:9090",
}, []tsdbutil.Sample{
sample{t: 1, v: 2},
2017-01-03 10:02:42 -08:00
}),
newSeries(map[string]string{
"handler": "query",
"instance": "localhost:9090",
}, []tsdbutil.Sample{
sample{t: 2, v: 2},
2017-01-03 10:02:42 -08:00
}),
}),
},
2016-12-19 02:44:11 -08:00
}
Outer:
for _, c := range cases {
res := NewMergedSeriesSet([]storage.SeriesSet{c.a, c.b})
2016-12-19 02:44:11 -08:00
for {
eok, rok := c.exp.Next(), res.Next()
testutil.Equals(t, eok, rok)
2016-12-19 02:44:11 -08:00
if !eok {
continue Outer
}
2017-01-02 04:27:52 -08:00
sexp := c.exp.At()
sres := res.At()
2016-12-19 02:44:11 -08:00
testutil.Equals(t, sexp.Labels(), sres.Labels())
2016-12-19 02:44:11 -08:00
smplExp, errExp := expandSeriesIterator(sexp.Iterator())
smplRes, errRes := expandSeriesIterator(sres.Iterator())
testutil.Equals(t, errExp, errRes)
testutil.Equals(t, smplExp, smplRes)
2016-12-19 02:44:11 -08:00
}
}
}
func expandSeriesIterator(it chunkenc.Iterator) (r []tsdbutil.Sample, err error) {
2016-12-19 02:44:11 -08:00
for it.Next() {
2017-01-02 04:27:52 -08:00
t, v := it.At()
2016-12-19 02:44:11 -08:00
r = append(r, sample{t: t, v: v})
}
return r, it.Err()
}
type seriesSamples struct {
lset map[string]string
chunks [][]sample
}
// Index: labels -> postings -> chunkMetas -> chunkRef
// ChunkReader: ref -> vals
func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkReader, int64, int64) {
sort.Slice(tc, func(i, j int) bool {
return labels.Compare(labels.FromMap(tc[i].lset), labels.FromMap(tc[i].lset)) < 0
})
postings := index.NewMemPostings()
chkReader := mockChunkReader(make(map[uint64]chunkenc.Chunk))
lblIdx := make(map[string]stringset)
mi := newMockIndex()
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
blockMint := int64(math.MaxInt64)
blockMaxt := int64(math.MinInt64)
var chunkRef uint64
for i, s := range tc {
i = i + 1 // 0 is not a valid posting.
metas := make([]chunks.Meta, 0, len(s.chunks))
for _, chk := range s.chunks {
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
if chk[0].t < blockMint {
blockMint = chk[0].t
}
if chk[len(chk)-1].t > blockMaxt {
blockMaxt = chk[len(chk)-1].t
}
metas = append(metas, chunks.Meta{
MinTime: chk[0].t,
MaxTime: chk[len(chk)-1].t,
Ref: chunkRef,
})
chunk := chunkenc.NewXORChunk()
app, _ := chunk.Appender()
for _, smpl := range chk {
app.Append(smpl.t, smpl.v)
}
chkReader[chunkRef] = chunk
chunkRef++
}
ls := labels.FromMap(s.lset)
testutil.Ok(t, mi.AddSeries(uint64(i), ls, metas...))
postings.Add(uint64(i), ls)
for _, l := range ls {
vs, present := lblIdx[l.Name]
if !present {
vs = stringset{}
lblIdx[l.Name] = vs
}
vs.set(l.Value)
}
}
testutil.Ok(t, postings.Iter(func(l labels.Label, p index.Postings) error {
return mi.WritePostings(l.Name, l.Value, p)
}))
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
return mi, chkReader, blockMint, blockMaxt
}
func TestBlockQuerier(t *testing.T) {
newSeries := func(l map[string]string, s []tsdbutil.Sample) storage.Series {
return &mockSeries{
labels: func() labels.Labels { return labels.FromMap(l) },
iterator: func() chunkenc.Iterator { return newListSeriesIterator(s) },
}
}
type query struct {
mint, maxt int64
ms []*labels.Matcher
exp storage.SeriesSet
}
cases := struct {
data []seriesSamples
queries []query
}{
data: []seriesSamples{
{
lset: map[string]string{
"a": "a",
},
chunks: [][]sample{
{
{1, 2}, {2, 3}, {3, 4},
},
{
{5, 2}, {6, 3}, {7, 4},
},
},
},
{
lset: map[string]string{
"a": "a",
"b": "b",
},
chunks: [][]sample{
{
{1, 1}, {2, 2}, {3, 3},
},
{
{5, 3}, {6, 6},
},
},
},
{
lset: map[string]string{
"b": "b",
},
chunks: [][]sample{
{
{1, 3}, {2, 2}, {3, 6},
},
{
{5, 1}, {6, 7}, {7, 2},
},
},
},
},
queries: []query{
{
mint: 0,
maxt: 0,
ms: []*labels.Matcher{},
exp: newMockSeriesSet([]storage.Series{}),
},
{
mint: 0,
maxt: 0,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{}),
},
{
mint: 1,
maxt: 0,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{}),
},
{
mint: 2,
maxt: 6,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
newSeries(map[string]string{
"a": "a",
},
[]tsdbutil.Sample{sample{2, 3}, sample{3, 4}, sample{5, 2}, sample{6, 3}},
),
newSeries(map[string]string{
"a": "a",
"b": "b",
},
[]tsdbutil.Sample{sample{2, 2}, sample{3, 3}, sample{5, 3}, sample{6, 6}},
),
}),
},
},
}
Outer:
2017-12-06 16:27:09 -08:00
for _, c := range cases.queries {
ir, cr, _, _ := createIdxChkReaders(t, cases.data)
querier := &blockQuerier{
index: ir,
chunks: cr,
tombstones: tombstones.NewMemTombstones(),
mint: c.mint,
maxt: c.maxt,
}
*: Consistent Error/Warning handling for SeriesSet iterator: Allowing Async Select (#7251) * Add errors and Warnings to SeriesSet Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Change Querier interface and refactor accordingly Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor promql/engine to propagate warnings at eval stage Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Make sure all the series from all Selects are pre-advanced Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Separate merge series sets Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Clean Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor merge querier failure handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactored and simplified fanout with improvements from incoming chunk iterator PRs. * Secondary logic is hidden, instead of weird failed series set logic we had. * Fanout is well commented * Fanout closing record all errors * MergeQuerier improved API (clearer) * deferredGenericMergeSeriesSet is not needed as we return no samples anyway for failed series sets (next = false). Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fix formatting Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix CI issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Added final tests for error handling. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. * Moved hints in populate to be allocated only when needed. * Used sync.Once in secondary Querier to achieve all-or-nothing partial response logic. * Select after first Next is done will panic. NOTE: in lazySeriesSet in theory we could just panic, I think however we can totally just return error, it will panic in expand anyway. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Utilize errWithWarnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix recently introduced expansion issue Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add tests for secondary querier error handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Implement lazy merge Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add name to test cases Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Reorganize Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Remove redundant warnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix rebase mistake Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-06-09 09:57:31 -07:00
res := querier.Select(false, nil, c.ms...)
for {
eok, rok := c.exp.Next(), res.Next()
testutil.Equals(t, eok, rok)
if !eok {
*: Consistent Error/Warning handling for SeriesSet iterator: Allowing Async Select (#7251) * Add errors and Warnings to SeriesSet Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Change Querier interface and refactor accordingly Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor promql/engine to propagate warnings at eval stage Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Make sure all the series from all Selects are pre-advanced Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Separate merge series sets Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Clean Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor merge querier failure handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactored and simplified fanout with improvements from incoming chunk iterator PRs. * Secondary logic is hidden, instead of weird failed series set logic we had. * Fanout is well commented * Fanout closing record all errors * MergeQuerier improved API (clearer) * deferredGenericMergeSeriesSet is not needed as we return no samples anyway for failed series sets (next = false). Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fix formatting Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix CI issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Added final tests for error handling. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. * Moved hints in populate to be allocated only when needed. * Used sync.Once in secondary Querier to achieve all-or-nothing partial response logic. * Select after first Next is done will panic. NOTE: in lazySeriesSet in theory we could just panic, I think however we can totally just return error, it will panic in expand anyway. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Utilize errWithWarnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix recently introduced expansion issue Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add tests for secondary querier error handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Implement lazy merge Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add name to test cases Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Reorganize Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Remove redundant warnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix rebase mistake Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-06-09 09:57:31 -07:00
testutil.Equals(t, 0, len(res.Warnings()))
continue Outer
}
sexp := c.exp.At()
sres := res.At()
testutil.Equals(t, sexp.Labels(), sres.Labels())
smplExp, errExp := expandSeriesIterator(sexp.Iterator())
smplRes, errRes := expandSeriesIterator(sres.Iterator())
testutil.Equals(t, errExp, errRes)
testutil.Equals(t, smplExp, smplRes)
}
}
}
func TestBlockQuerierDelete(t *testing.T) {
newSeries := func(l map[string]string, s []tsdbutil.Sample) storage.Series {
return &mockSeries{
labels: func() labels.Labels { return labels.FromMap(l) },
iterator: func() chunkenc.Iterator { return newListSeriesIterator(s) },
}
}
type query struct {
mint, maxt int64
ms []*labels.Matcher
exp storage.SeriesSet
}
cases := struct {
data []seriesSamples
tombstones tombstones.Reader
queries []query
}{
data: []seriesSamples{
{
lset: map[string]string{
"a": "a",
},
chunks: [][]sample{
{
{1, 2}, {2, 3}, {3, 4},
},
{
{5, 2}, {6, 3}, {7, 4},
},
},
},
{
lset: map[string]string{
"a": "a",
"b": "b",
},
chunks: [][]sample{
{
{1, 1}, {2, 2}, {3, 3},
},
{
{4, 15}, {5, 3}, {6, 6},
},
},
},
{
lset: map[string]string{
"b": "b",
},
chunks: [][]sample{
{
{1, 3}, {2, 2}, {3, 6},
},
{
{5, 1}, {6, 7}, {7, 2},
},
},
},
},
tombstones: tombstones.NewTestMemTombstones([]tombstones.Intervals{
{{Mint: 1, Maxt: 3}},
{{Mint: 1, Maxt: 3}, {Mint: 6, Maxt: 10}},
{{Mint: 6, Maxt: 10}},
}),
queries: []query{
{
mint: 2,
maxt: 7,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
newSeries(map[string]string{
"a": "a",
},
[]tsdbutil.Sample{sample{5, 2}, sample{6, 3}, sample{7, 4}},
),
newSeries(map[string]string{
"a": "a",
"b": "b",
},
[]tsdbutil.Sample{sample{4, 15}, sample{5, 3}},
),
}),
},
{
mint: 2,
maxt: 7,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "b", "b")},
exp: newMockSeriesSet([]storage.Series{
newSeries(map[string]string{
"a": "a",
"b": "b",
},
[]tsdbutil.Sample{sample{4, 15}, sample{5, 3}},
),
newSeries(map[string]string{
"b": "b",
},
[]tsdbutil.Sample{sample{2, 2}, sample{3, 6}, sample{5, 1}},
),
}),
},
{
mint: 1,
maxt: 4,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
newSeries(map[string]string{
"a": "a",
"b": "b",
},
[]tsdbutil.Sample{sample{4, 15}},
),
}),
},
{
mint: 1,
maxt: 3,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
exp: newMockSeriesSet([]storage.Series{}),
},
},
}
Outer:
for _, c := range cases.queries {
ir, cr, _, _ := createIdxChkReaders(t, cases.data)
querier := &blockQuerier{
index: ir,
chunks: cr,
tombstones: cases.tombstones,
mint: c.mint,
maxt: c.maxt,
}
*: Consistent Error/Warning handling for SeriesSet iterator: Allowing Async Select (#7251) * Add errors and Warnings to SeriesSet Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Change Querier interface and refactor accordingly Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor promql/engine to propagate warnings at eval stage Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Make sure all the series from all Selects are pre-advanced Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Separate merge series sets Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Clean Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor merge querier failure handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactored and simplified fanout with improvements from incoming chunk iterator PRs. * Secondary logic is hidden, instead of weird failed series set logic we had. * Fanout is well commented * Fanout closing record all errors * MergeQuerier improved API (clearer) * deferredGenericMergeSeriesSet is not needed as we return no samples anyway for failed series sets (next = false). Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fix formatting Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix CI issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Added final tests for error handling. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. * Moved hints in populate to be allocated only when needed. * Used sync.Once in secondary Querier to achieve all-or-nothing partial response logic. * Select after first Next is done will panic. NOTE: in lazySeriesSet in theory we could just panic, I think however we can totally just return error, it will panic in expand anyway. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Utilize errWithWarnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix recently introduced expansion issue Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add tests for secondary querier error handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Implement lazy merge Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add name to test cases Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Reorganize Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Remove redundant warnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix rebase mistake Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-06-09 09:57:31 -07:00
res := querier.Select(false, nil, c.ms...)
for {
eok, rok := c.exp.Next(), res.Next()
testutil.Equals(t, eok, rok)
if !eok {
*: Consistent Error/Warning handling for SeriesSet iterator: Allowing Async Select (#7251) * Add errors and Warnings to SeriesSet Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Change Querier interface and refactor accordingly Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor promql/engine to propagate warnings at eval stage Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Make sure all the series from all Selects are pre-advanced Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Separate merge series sets Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Clean Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor merge querier failure handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactored and simplified fanout with improvements from incoming chunk iterator PRs. * Secondary logic is hidden, instead of weird failed series set logic we had. * Fanout is well commented * Fanout closing record all errors * MergeQuerier improved API (clearer) * deferredGenericMergeSeriesSet is not needed as we return no samples anyway for failed series sets (next = false). Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fix formatting Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix CI issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Added final tests for error handling. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. * Moved hints in populate to be allocated only when needed. * Used sync.Once in secondary Querier to achieve all-or-nothing partial response logic. * Select after first Next is done will panic. NOTE: in lazySeriesSet in theory we could just panic, I think however we can totally just return error, it will panic in expand anyway. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Utilize errWithWarnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix recently introduced expansion issue Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add tests for secondary querier error handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Implement lazy merge Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add name to test cases Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Reorganize Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Remove redundant warnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix rebase mistake Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-06-09 09:57:31 -07:00
testutil.Equals(t, 0, len(res.Warnings()))
continue Outer
}
sexp := c.exp.At()
sres := res.At()
testutil.Equals(t, sexp.Labels(), sres.Labels())
smplExp, errExp := expandSeriesIterator(sexp.Iterator())
smplRes, errRes := expandSeriesIterator(sres.Iterator())
testutil.Equals(t, errExp, errRes)
testutil.Equals(t, smplExp, smplRes)
}
}
}
func TestBaseChunkSeries(t *testing.T) {
type refdSeries struct {
lset labels.Labels
chunks []chunks.Meta
2017-09-04 07:08:38 -07:00
ref uint64
}
cases := []struct {
series []refdSeries
// Postings should be in the sorted order of the series
2017-09-04 07:08:38 -07:00
postings []uint64
expIdxs []int
}{
{
series: []refdSeries{
{
lset: labels.New([]labels.Label{{Name: "a", Value: "a"}}...),
chunks: []chunks.Meta{
{Ref: 29}, {Ref: 45}, {Ref: 245}, {Ref: 123}, {Ref: 4232}, {Ref: 5344},
{Ref: 121},
},
ref: 12,
},
{
lset: labels.New([]labels.Label{{Name: "a", Value: "a"}, {Name: "b", Value: "b"}}...),
chunks: []chunks.Meta{
{Ref: 82}, {Ref: 23}, {Ref: 234}, {Ref: 65}, {Ref: 26},
},
ref: 10,
},
{
lset: labels.New([]labels.Label{{Name: "b", Value: "c"}}...),
chunks: []chunks.Meta{{Ref: 8282}},
ref: 1,
},
{
lset: labels.New([]labels.Label{{Name: "b", Value: "b"}}...),
chunks: []chunks.Meta{
{Ref: 829}, {Ref: 239}, {Ref: 2349}, {Ref: 659}, {Ref: 269},
},
ref: 108,
},
},
postings: []uint64{12, 13, 10, 108}, // 13 doesn't exist and should just be skipped over.
expIdxs: []int{0, 1, 3},
},
{
series: []refdSeries{
{
lset: labels.New([]labels.Label{{Name: "a", Value: "a"}, {Name: "b", Value: "b"}}...),
chunks: []chunks.Meta{
{Ref: 82}, {Ref: 23}, {Ref: 234}, {Ref: 65}, {Ref: 26},
},
ref: 10,
},
{
lset: labels.New([]labels.Label{{Name: "b", Value: "c"}}...),
chunks: []chunks.Meta{{Ref: 8282}},
ref: 3,
},
},
2017-09-04 07:08:38 -07:00
postings: []uint64{},
expIdxs: []int{},
},
}
for _, tc := range cases {
mi := newMockIndex()
for _, s := range tc.series {
testutil.Ok(t, mi.AddSeries(s.ref, s.lset, s.chunks...))
}
bcs := &baseChunkSeries{
p: index.NewListPostings(tc.postings),
index: mi,
tombstones: tombstones.NewMemTombstones(),
}
i := 0
for bcs.Next() {
lset, chks, _ := bcs.At()
idx := tc.expIdxs[i]
testutil.Equals(t, tc.series[idx].lset, lset)
testutil.Equals(t, tc.series[idx].chunks, chks)
i++
}
testutil.Equals(t, len(tc.expIdxs), i)
testutil.Ok(t, bcs.Err())
}
}
type itSeries struct {
si chunkenc.Iterator
}
func (s itSeries) Iterator() chunkenc.Iterator { return s.si }
func (s itSeries) Labels() labels.Labels { return labels.Labels{} }
func TestSeriesIterator(t *testing.T) {
itcases := []struct {
a, b, c []tsdbutil.Sample
exp []tsdbutil.Sample
mint, maxt int64
}{
{
a: []tsdbutil.Sample{},
b: []tsdbutil.Sample{},
c: []tsdbutil.Sample{},
exp: []tsdbutil.Sample{},
mint: math.MinInt64,
maxt: math.MaxInt64,
},
{
a: []tsdbutil.Sample{
sample{1, 2},
sample{2, 3},
sample{3, 5},
sample{6, 1},
},
b: []tsdbutil.Sample{},
c: []tsdbutil.Sample{
sample{7, 89}, sample{9, 8},
},
exp: []tsdbutil.Sample{
sample{1, 2}, sample{2, 3}, sample{3, 5}, sample{6, 1}, sample{7, 89}, sample{9, 8},
},
mint: math.MinInt64,
maxt: math.MaxInt64,
},
{
a: []tsdbutil.Sample{},
b: []tsdbutil.Sample{
sample{1, 2}, sample{2, 3}, sample{3, 5}, sample{6, 1},
},
c: []tsdbutil.Sample{
sample{7, 89}, sample{9, 8},
},
exp: []tsdbutil.Sample{
sample{1, 2}, sample{2, 3}, sample{3, 5}, sample{6, 1}, sample{7, 89}, sample{9, 8},
},
mint: 2,
maxt: 8,
},
{
a: []tsdbutil.Sample{
sample{1, 2}, sample{2, 3}, sample{3, 5}, sample{6, 1},
},
b: []tsdbutil.Sample{
sample{7, 89}, sample{9, 8},
},
c: []tsdbutil.Sample{
sample{10, 22}, sample{203, 3493},
},
exp: []tsdbutil.Sample{
sample{1, 2}, sample{2, 3}, sample{3, 5}, sample{6, 1}, sample{7, 89}, sample{9, 8}, sample{10, 22}, sample{203, 3493},
},
mint: 6,
maxt: 10,
},
}
seekcases := []struct {
a, b, c []tsdbutil.Sample
seek int64
success bool
exp []tsdbutil.Sample
mint, maxt int64
}{
{
a: []tsdbutil.Sample{},
b: []tsdbutil.Sample{},
c: []tsdbutil.Sample{},
seek: 0,
success: false,
exp: nil,
},
{
a: []tsdbutil.Sample{
sample{2, 3},
},
b: []tsdbutil.Sample{},
c: []tsdbutil.Sample{
sample{7, 89}, sample{9, 8},
},
seek: 10,
success: false,
exp: nil,
mint: math.MinInt64,
maxt: math.MaxInt64,
},
{
a: []tsdbutil.Sample{},
b: []tsdbutil.Sample{
sample{1, 2}, sample{3, 5}, sample{6, 1},
},
c: []tsdbutil.Sample{
sample{7, 89}, sample{9, 8},
},
seek: 2,
success: true,
exp: []tsdbutil.Sample{
sample{3, 5}, sample{6, 1}, sample{7, 89}, sample{9, 8},
},
mint: 5,
maxt: 8,
},
{
a: []tsdbutil.Sample{
sample{6, 1},
},
b: []tsdbutil.Sample{
sample{9, 8},
},
c: []tsdbutil.Sample{
sample{10, 22}, sample{203, 3493},
},
seek: 10,
success: true,
exp: []tsdbutil.Sample{
sample{10, 22}, sample{203, 3493},
},
mint: 10,
maxt: 203,
},
{
a: []tsdbutil.Sample{
sample{6, 1},
},
b: []tsdbutil.Sample{
sample{9, 8},
},
c: []tsdbutil.Sample{
sample{10, 22}, sample{203, 3493},
},
seek: 203,
success: true,
exp: []tsdbutil.Sample{
sample{203, 3493},
},
mint: 7,
maxt: 203,
},
}
t.Run("Chunk", func(t *testing.T) {
for _, tc := range itcases {
chkMetas := []chunks.Meta{
tsdbutil.ChunkFromSamples(tc.a),
tsdbutil.ChunkFromSamples(tc.b),
tsdbutil.ChunkFromSamples(tc.c),
}
res := newChunkSeriesIterator(chkMetas, nil, tc.mint, tc.maxt)
smplValid := make([]tsdbutil.Sample, 0)
for _, s := range tc.exp {
if s.T() >= tc.mint && s.T() <= tc.maxt {
smplValid = append(smplValid, tsdbutil.Sample(s))
}
}
exp := newListSeriesIterator(smplValid)
smplExp, errExp := expandSeriesIterator(exp)
smplRes, errRes := expandSeriesIterator(res)
testutil.Equals(t, errExp, errRes)
testutil.Equals(t, smplExp, smplRes)
}
t.Run("Seek", func(t *testing.T) {
extra := []struct {
a, b, c []tsdbutil.Sample
seek int64
success bool
exp []tsdbutil.Sample
mint, maxt int64
}{
{
a: []tsdbutil.Sample{
sample{6, 1},
},
b: []tsdbutil.Sample{
sample{9, 8},
},
c: []tsdbutil.Sample{
sample{10, 22}, sample{203, 3493},
},
seek: 203,
success: false,
exp: nil,
mint: 2,
maxt: 202,
},
{
a: []tsdbutil.Sample{
sample{6, 1},
},
b: []tsdbutil.Sample{
sample{9, 8},
},
c: []tsdbutil.Sample{
sample{10, 22}, sample{203, 3493},
},
seek: 5,
success: true,
exp: []tsdbutil.Sample{sample{10, 22}},
mint: 10,
maxt: 202,
},
}
seekcases2 := append(seekcases, extra...)
for _, tc := range seekcases2 {
chkMetas := []chunks.Meta{
tsdbutil.ChunkFromSamples(tc.a),
tsdbutil.ChunkFromSamples(tc.b),
tsdbutil.ChunkFromSamples(tc.c),
}
res := newChunkSeriesIterator(chkMetas, nil, tc.mint, tc.maxt)
smplValid := make([]tsdbutil.Sample, 0)
for _, s := range tc.exp {
if s.T() >= tc.mint && s.T() <= tc.maxt {
smplValid = append(smplValid, tsdbutil.Sample(s))
}
}
exp := newListSeriesIterator(smplValid)
testutil.Equals(t, tc.success, res.Seek(tc.seek))
if tc.success {
// Init the list and then proceed to check.
remaining := exp.Next()
testutil.Assert(t, remaining == true, "")
for remaining {
sExp, eExp := exp.At()
sRes, eRes := res.At()
testutil.Equals(t, eExp, eRes)
testutil.Equals(t, sExp, sRes)
remaining = exp.Next()
testutil.Equals(t, remaining, res.Next())
}
}
}
})
})
t.Run("Chain", func(t *testing.T) {
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
// Extra cases for overlapping series.
itcasesExtra := []struct {
a, b, c []tsdbutil.Sample
exp []tsdbutil.Sample
mint, maxt int64
}{
{
a: []tsdbutil.Sample{
sample{1, 2}, sample{2, 3}, sample{3, 5}, sample{6, 1},
},
b: []tsdbutil.Sample{
sample{5, 49}, sample{7, 89}, sample{9, 8},
},
c: []tsdbutil.Sample{
sample{2, 33}, sample{4, 44}, sample{10, 3},
},
exp: []tsdbutil.Sample{
sample{1, 2}, sample{2, 33}, sample{3, 5}, sample{4, 44}, sample{5, 49}, sample{6, 1}, sample{7, 89}, sample{9, 8}, sample{10, 3},
},
mint: math.MinInt64,
maxt: math.MaxInt64,
},
{
a: []tsdbutil.Sample{
sample{1, 2}, sample{2, 3}, sample{9, 5}, sample{13, 1},
},
b: []tsdbutil.Sample{},
c: []tsdbutil.Sample{
sample{1, 23}, sample{2, 342}, sample{3, 25}, sample{6, 11},
},
exp: []tsdbutil.Sample{
sample{1, 23}, sample{2, 342}, sample{3, 25}, sample{6, 11}, sample{9, 5}, sample{13, 1},
},
mint: math.MinInt64,
maxt: math.MaxInt64,
},
}
for _, tc := range itcases {
a, b, c := itSeries{newListSeriesIterator(tc.a)},
itSeries{newListSeriesIterator(tc.b)},
itSeries{newListSeriesIterator(tc.c)}
res := newChainedSeriesIterator(a, b, c)
exp := newListSeriesIterator([]tsdbutil.Sample(tc.exp))
smplExp, errExp := expandSeriesIterator(exp)
smplRes, errRes := expandSeriesIterator(res)
testutil.Equals(t, errExp, errRes)
testutil.Equals(t, smplExp, smplRes)
}
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
for _, tc := range append(itcases, itcasesExtra...) {
a, b, c := itSeries{newListSeriesIterator(tc.a)},
itSeries{newListSeriesIterator(tc.b)},
itSeries{newListSeriesIterator(tc.c)}
res := newVerticalMergeSeriesIterator(a, b, c)
exp := newListSeriesIterator([]tsdbutil.Sample(tc.exp))
smplExp, errExp := expandSeriesIterator(exp)
smplRes, errRes := expandSeriesIterator(res)
testutil.Equals(t, errExp, errRes)
testutil.Equals(t, smplExp, smplRes)
}
t.Run("Seek", func(t *testing.T) {
for _, tc := range seekcases {
ress := []chunkenc.Iterator{
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
newChainedSeriesIterator(
itSeries{newListSeriesIterator(tc.a)},
itSeries{newListSeriesIterator(tc.b)},
itSeries{newListSeriesIterator(tc.c)},
),
newVerticalMergeSeriesIterator(
itSeries{newListSeriesIterator(tc.a)},
itSeries{newListSeriesIterator(tc.b)},
itSeries{newListSeriesIterator(tc.c)},
),
}
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
for _, res := range ress {
exp := newListSeriesIterator(tc.exp)
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
testutil.Equals(t, tc.success, res.Seek(tc.seek))
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
if tc.success {
// Init the list and then proceed to check.
remaining := exp.Next()
testutil.Assert(t, remaining == true, "")
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
for remaining {
sExp, eExp := exp.At()
sRes, eRes := res.At()
testutil.Equals(t, eExp, eRes)
testutil.Equals(t, sExp, sRes)
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
remaining = exp.Next()
testutil.Equals(t, remaining, res.Next())
}
}
}
}
})
})
}
Make head Postings only return series in time range benchmark old ns/op new ns/op delta BenchmarkQuerierSelect/Head/1of1000000-8 405805161 120436132 -70.32% BenchmarkQuerierSelect/Head/10of1000000-8 403079620 120624292 -70.07% BenchmarkQuerierSelect/Head/100of1000000-8 404678647 120923522 -70.12% BenchmarkQuerierSelect/Head/1000of1000000-8 403145813 118636563 -70.57% BenchmarkQuerierSelect/Head/10000of1000000-8 405020046 125716206 -68.96% BenchmarkQuerierSelect/Head/100000of1000000-8 426305002 175808499 -58.76% BenchmarkQuerierSelect/Head/1000000of1000000-8 619002108 567013003 -8.40% BenchmarkQuerierSelect/SortedHead/1of1000000-8 1276316086 120281094 -90.58% BenchmarkQuerierSelect/SortedHead/10of1000000-8 1282631170 121836526 -90.50% BenchmarkQuerierSelect/SortedHead/100of1000000-8 1325824787 121174967 -90.86% BenchmarkQuerierSelect/SortedHead/1000of1000000-8 1271386268 121025117 -90.48% BenchmarkQuerierSelect/SortedHead/10000of1000000-8 1280223345 130838948 -89.78% BenchmarkQuerierSelect/SortedHead/100000of1000000-8 1271401620 243635515 -80.84% BenchmarkQuerierSelect/SortedHead/1000000of1000000-8 1360256090 1307744674 -3.86% BenchmarkQuerierSelect/Block/1of1000000-8 748183120 707888498 -5.39% BenchmarkQuerierSelect/Block/10of1000000-8 741084129 716317249 -3.34% BenchmarkQuerierSelect/Block/100of1000000-8 722157273 735624256 +1.86% BenchmarkQuerierSelect/Block/1000of1000000-8 727587744 731981838 +0.60% BenchmarkQuerierSelect/Block/10000of1000000-8 727518578 726860308 -0.09% BenchmarkQuerierSelect/Block/100000of1000000-8 765577046 757382386 -1.07% BenchmarkQuerierSelect/Block/1000000of1000000-8 1126722881 1084779083 -3.72% benchmark old allocs new allocs delta BenchmarkQuerierSelect/Head/1of1000000-8 4000018 24 -100.00% BenchmarkQuerierSelect/Head/10of1000000-8 4000036 82 -100.00% BenchmarkQuerierSelect/Head/100of1000000-8 4000216 625 -99.98% BenchmarkQuerierSelect/Head/1000of1000000-8 4002016 6028 -99.85% BenchmarkQuerierSelect/Head/10000of1000000-8 4020016 60037 -98.51% BenchmarkQuerierSelect/Head/100000of1000000-8 4200016 600047 -85.71% BenchmarkQuerierSelect/Head/1000000of1000000-8 6000016 6000016 +0.00% BenchmarkQuerierSelect/SortedHead/1of1000000-8 4000055 28 -100.00% BenchmarkQuerierSelect/SortedHead/10of1000000-8 4000073 87 -100.00% BenchmarkQuerierSelect/SortedHead/100of1000000-8 4000253 630 -99.98% BenchmarkQuerierSelect/SortedHead/1000of1000000-8 4002053 6036 -99.85% BenchmarkQuerierSelect/SortedHead/10000of1000000-8 4020053 60054 -98.51% BenchmarkQuerierSelect/SortedHead/100000of1000000-8 4200053 600074 -85.71% BenchmarkQuerierSelect/SortedHead/1000000of1000000-8 6000053 6000053 +0.00% BenchmarkQuerierSelect/Block/1of1000000-8 6000021 6000021 +0.00% BenchmarkQuerierSelect/Block/10of1000000-8 6000057 6000057 +0.00% BenchmarkQuerierSelect/Block/100of1000000-8 6000417 6000417 +0.00% BenchmarkQuerierSelect/Block/1000of1000000-8 6004017 6004017 +0.00% BenchmarkQuerierSelect/Block/10000of1000000-8 6040017 6040017 +0.00% BenchmarkQuerierSelect/Block/100000of1000000-8 6400017 6400017 +0.00% BenchmarkQuerierSelect/Block/1000000of1000000-8 10000018 10000018 +0.00% benchmark old bytes new bytes delta BenchmarkQuerierSelect/Head/1of1000000-8 176001177 1392 -100.00% BenchmarkQuerierSelect/Head/10of1000000-8 176002329 4368 -100.00% BenchmarkQuerierSelect/Head/100of1000000-8 176013849 33520 -99.98% BenchmarkQuerierSelect/Head/1000of1000000-8 176129056 321456 -99.82% BenchmarkQuerierSelect/Head/10000of1000000-8 177281049 3427376 -98.07% BenchmarkQuerierSelect/Head/100000of1000000-8 188801049 35055408 -81.43% BenchmarkQuerierSelect/Head/1000000of1000000-8 304001059 304001049 -0.00% BenchmarkQuerierSelect/SortedHead/1of1000000-8 229192188 2488 -100.00% BenchmarkQuerierSelect/SortedHead/10of1000000-8 229193340 5568 -100.00% BenchmarkQuerierSelect/SortedHead/100of1000000-8 229204860 35536 -99.98% BenchmarkQuerierSelect/SortedHead/1000of1000000-8 229320060 345104 -99.85% BenchmarkQuerierSelect/SortedHead/10000of1000000-8 230472060 3894672 -98.31% BenchmarkQuerierSelect/SortedHead/100000of1000000-8 241992060 40511632 -83.26% BenchmarkQuerierSelect/SortedHead/1000000of1000000-8 357192060 357192060 +0.00% BenchmarkQuerierSelect/Block/1of1000000-8 227201516 227201506 -0.00% BenchmarkQuerierSelect/Block/10of1000000-8 227203057 227203041 -0.00% BenchmarkQuerierSelect/Block/100of1000000-8 227217161 227217165 +0.00% BenchmarkQuerierSelect/Block/1000of1000000-8 227358279 227358289 +0.00% BenchmarkQuerierSelect/Block/10000of1000000-8 228769485 228769475 -0.00% BenchmarkQuerierSelect/Block/100000of1000000-8 242881487 242881477 -0.00% BenchmarkQuerierSelect/Block/1000000of1000000-8 384001705 384001705 +0.00% Signed-off-by: Julien Pivotto <roidelapluie@inuits.eu>
2020-01-21 11:30:20 -08:00
// Regression for: https://github.com/prometheus/tsdb/pull/97
2017-06-30 06:06:27 -07:00
func TestChunkSeriesIterator_DoubleSeek(t *testing.T) {
chkMetas := []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{4, 4}, sample{5, 5}}),
}
res := newChunkSeriesIterator(chkMetas, nil, 2, 8)
testutil.Assert(t, res.Seek(1) == true, "")
testutil.Assert(t, res.Seek(2) == true, "")
ts, v := res.At()
testutil.Equals(t, int64(2), ts)
testutil.Equals(t, float64(2), v)
}
2017-06-30 06:06:27 -07:00
// Regression when seeked chunks were still found via binary search and we always
// skipped to the end when seeking a value in the current chunk.
func TestChunkSeriesIterator_SeekInCurrentChunk(t *testing.T) {
metas := []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 2}, sample{3, 4}, sample{5, 6}, sample{7, 8}}),
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{}),
2017-06-30 06:06:27 -07:00
}
it := newChunkSeriesIterator(metas, nil, 1, 7)
testutil.Assert(t, it.Next() == true, "")
2017-06-30 06:06:27 -07:00
ts, v := it.At()
testutil.Equals(t, int64(1), ts)
testutil.Equals(t, float64(2), v)
2017-06-30 06:06:27 -07:00
testutil.Assert(t, it.Seek(4) == true, "")
2017-06-30 06:06:27 -07:00
ts, v = it.At()
testutil.Equals(t, int64(5), ts)
testutil.Equals(t, float64(6), v)
2017-06-30 06:06:27 -07:00
}
// Regression when calling Next() with a time bounded to fit within two samples.
// Seek gets called and advances beyond the max time, which was just accepted as a valid sample.
func TestChunkSeriesIterator_NextWithMinTime(t *testing.T) {
metas := []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 6}, sample{5, 6}, sample{7, 8}}),
}
it := newChunkSeriesIterator(metas, nil, 2, 4)
testutil.Assert(t, it.Next() == false, "")
}
func TestPopulatedCSReturnsValidChunkSlice(t *testing.T) {
lbls := []labels.Labels{labels.New(labels.Label{Name: "a", Value: "b"})}
chunkMetas := [][]chunks.Meta{
{
{MinTime: 1, MaxTime: 2, Ref: 1},
{MinTime: 3, MaxTime: 4, Ref: 2},
{MinTime: 10, MaxTime: 12, Ref: 3},
},
}
cr := mockChunkReader(
map[uint64]chunkenc.Chunk{
1: chunkenc.NewXORChunk(),
2: chunkenc.NewXORChunk(),
3: chunkenc.NewXORChunk(),
},
)
m := &mockChunkSeriesSet{l: lbls, cm: chunkMetas, i: -1}
p := &populatedChunkSeries{
set: m,
chunks: cr,
mint: 0,
maxt: 0,
}
testutil.Assert(t, p.Next() == false, "")
p.mint = 6
p.maxt = 9
testutil.Assert(t, p.Next() == false, "")
// Test the case where 1 chunk could cause an unpopulated chunk to be returned.
chunkMetas = [][]chunks.Meta{
{
{MinTime: 1, MaxTime: 2, Ref: 1},
},
}
m = &mockChunkSeriesSet{l: lbls, cm: chunkMetas, i: -1}
p = &populatedChunkSeries{
set: m,
chunks: cr,
mint: 10,
maxt: 15,
}
testutil.Assert(t, p.Next() == false, "")
}
type mockChunkSeriesSet struct {
l []labels.Labels
cm [][]chunks.Meta
i int
}
func (m *mockChunkSeriesSet) Next() bool {
if len(m.l) != len(m.cm) {
return false
}
m.i++
return m.i < len(m.l)
}
func (m *mockChunkSeriesSet) At() (labels.Labels, []chunks.Meta, tombstones.Intervals) {
return m.l[m.i], m.cm[m.i], nil
}
func (m *mockChunkSeriesSet) Err() error {
return nil
}
2017-07-05 07:19:28 -07:00
// Test the cost of merging series sets for different number of merged sets and their size.
// The subset are all equivalent so this does not capture merging of partial or non-overlapping sets well.
// TODO(bwplotka): Merge with storage merged series set benchmark.
2017-07-05 07:19:28 -07:00
func BenchmarkMergedSeriesSet(b *testing.B) {
var sel = func(sets []storage.SeriesSet) storage.SeriesSet {
return NewMergedSeriesSet(sets)
2017-07-05 07:19:28 -07:00
}
for _, k := range []int{
100,
1000,
10000,
20000,
2017-07-05 07:19:28 -07:00
} {
for _, j := range []int{1, 2, 4, 8, 16, 32} {
b.Run(fmt.Sprintf("series=%d,blocks=%d", k, j), func(b *testing.B) {
lbls, err := labels.ReadLabels(filepath.Join("testdata", "20kseries.json"), k)
testutil.Ok(b, err)
2017-07-05 07:19:28 -07:00
sort.Sort(labels.Slice(lbls))
in := make([][]storage.Series, j)
2017-07-05 07:19:28 -07:00
for _, l := range lbls {
l2 := l
for j := range in {
in[j] = append(in[j], &mockSeries{labels: func() labels.Labels { return l2 }})
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
var sets []storage.SeriesSet
2017-07-05 07:19:28 -07:00
for _, s := range in {
sets = append(sets, newMockSeriesSet(s))
2017-07-05 07:19:28 -07:00
}
ms := sel(sets)
i := 0
for ms.Next() {
i++
}
testutil.Ok(b, ms.Err())
testutil.Equals(b, len(lbls), i)
2017-07-05 07:19:28 -07:00
}
})
}
}
}
type mockChunkReader map[uint64]chunkenc.Chunk
func (cr mockChunkReader) Chunk(id uint64) (chunkenc.Chunk, error) {
chk, ok := cr[id]
if ok {
return chk, nil
}
return nil, errors.New("Chunk with ref not found")
}
func (cr mockChunkReader) Close() error {
return nil
}
func TestDeletedIterator(t *testing.T) {
chk := chunkenc.NewXORChunk()
app, err := chk.Appender()
testutil.Ok(t, err)
// Insert random stuff from (0, 1000).
act := make([]sample, 1000)
for i := 0; i < 1000; i++ {
act[i].t = int64(i)
act[i].v = rand.Float64()
app.Append(act[i].t, act[i].v)
}
cases := []struct {
r tombstones.Intervals
}{
{r: tombstones.Intervals{{Mint: 1, Maxt: 20}}},
{r: tombstones.Intervals{{Mint: 1, Maxt: 10}, {Mint: 12, Maxt: 20}, {Mint: 21, Maxt: 23}, {Mint: 25, Maxt: 30}}},
{r: tombstones.Intervals{{Mint: 1, Maxt: 10}, {Mint: 12, Maxt: 20}, {Mint: 20, Maxt: 30}}},
{r: tombstones.Intervals{{Mint: 1, Maxt: 10}, {Mint: 12, Maxt: 23}, {Mint: 25, Maxt: 30}}},
{r: tombstones.Intervals{{Mint: 1, Maxt: 23}, {Mint: 12, Maxt: 20}, {Mint: 25, Maxt: 30}}},
{r: tombstones.Intervals{{Mint: 1, Maxt: 23}, {Mint: 12, Maxt: 20}, {Mint: 25, Maxt: 3000}}},
{r: tombstones.Intervals{{Mint: 0, Maxt: 2000}}},
{r: tombstones.Intervals{{Mint: 500, Maxt: 2000}}},
{r: tombstones.Intervals{{Mint: 0, Maxt: 200}}},
{r: tombstones.Intervals{{Mint: 1000, Maxt: 20000}}},
}
for _, c := range cases {
i := int64(-1)
it := &deletedIterator{it: chk.Iterator(nil), intervals: c.r[:]}
ranges := c.r[:]
for it.Next() {
i++
for _, tr := range ranges {
if tr.InBounds(i) {
i = tr.Maxt + 1
ranges = ranges[1:]
}
}
testutil.Assert(t, i < 1000, "")
ts, v := it.At()
testutil.Equals(t, act[i].t, ts)
testutil.Equals(t, act[i].v, v)
}
// There has been an extra call to Next().
i++
for _, tr := range ranges {
if tr.InBounds(i) {
i = tr.Maxt + 1
ranges = ranges[1:]
}
}
testutil.Assert(t, i >= 1000, "")
testutil.Ok(t, it.Err())
}
}
func TestDeletedIterator_WithSeek(t *testing.T) {
chk := chunkenc.NewXORChunk()
app, err := chk.Appender()
testutil.Ok(t, err)
// Insert random stuff from (0, 1000).
act := make([]sample, 1000)
for i := 0; i < 1000; i++ {
act[i].t = int64(i)
act[i].v = float64(i)
app.Append(act[i].t, act[i].v)
}
cases := []struct {
r tombstones.Intervals
seek int64
ok bool
seekedTs int64
}{
{r: tombstones.Intervals{{Mint: 1, Maxt: 20}}, seek: 1, ok: true, seekedTs: 21},
{r: tombstones.Intervals{{Mint: 1, Maxt: 20}}, seek: 20, ok: true, seekedTs: 21},
{r: tombstones.Intervals{{Mint: 1, Maxt: 20}}, seek: 10, ok: true, seekedTs: 21},
{r: tombstones.Intervals{{Mint: 1, Maxt: 20}}, seek: 999, ok: true, seekedTs: 999},
{r: tombstones.Intervals{{Mint: 1, Maxt: 20}}, seek: 1000, ok: false},
{r: tombstones.Intervals{{Mint: 1, Maxt: 23}, {Mint: 24, Maxt: 40}, {Mint: 45, Maxt: 3000}}, seek: 1, ok: true, seekedTs: 41},
{r: tombstones.Intervals{{Mint: 5, Maxt: 23}, {Mint: 24, Maxt: 40}, {Mint: 41, Maxt: 3000}}, seek: 5, ok: false},
{r: tombstones.Intervals{{Mint: 0, Maxt: 2000}}, seek: 10, ok: false},
{r: tombstones.Intervals{{Mint: 500, Maxt: 2000}}, seek: 10, ok: true, seekedTs: 10},
{r: tombstones.Intervals{{Mint: 500, Maxt: 2000}}, seek: 501, ok: false},
}
for _, c := range cases {
it := &deletedIterator{it: chk.Iterator(nil), intervals: c.r[:]}
testutil.Equals(t, c.ok, it.Seek(c.seek))
if c.ok {
ts, _ := it.At()
testutil.Equals(t, c.seekedTs, ts)
}
}
}
type series struct {
l labels.Labels
chunks []chunks.Meta
}
type mockIndex struct {
series map[uint64]series
postings map[labels.Label][]uint64
symbols map[string]struct{}
}
func newMockIndex() mockIndex {
ix := mockIndex{
series: make(map[uint64]series),
postings: make(map[labels.Label][]uint64),
symbols: make(map[string]struct{}),
}
return ix
}
Stream symbols during compaction. (#6468) Rather than buffer up symbols in RAM, do it one by one during compaction. Then use the reader's symbol handling for symbol lookups during the rest of the index write. There is some slowdown in compaction, due to having to look through a file rather than a hash lookup. This is noise to the overall cost of compacting series with thousands of samples though. benchmark old ns/op new ns/op delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35% benchmark old allocs new allocs delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59% benchmark old bytes new bytes delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
func (m mockIndex) Symbols() index.StringIter {
l := []string{}
for s := range m.symbols {
l = append(l, s)
}
sort.Strings(l)
return index.NewStringListIter(l)
}
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
func (m *mockIndex) AddSeries(ref uint64, l labels.Labels, chunks ...chunks.Meta) error {
if _, ok := m.series[ref]; ok {
return errors.Errorf("series with reference %d already added", ref)
}
for _, lbl := range l {
m.symbols[lbl.Name] = struct{}{}
m.symbols[lbl.Value] = struct{}{}
}
s := series{l: l}
// Actual chunk data is not stored in the index.
for _, c := range chunks {
c.Chunk = nil
s.chunks = append(s.chunks, c)
}
m.series[ref] = s
return nil
}
func (m mockIndex) WritePostings(name, value string, it index.Postings) error {
l := labels.Label{Name: name, Value: value}
if _, ok := m.postings[l]; ok {
return errors.Errorf("postings for %s already added", l)
}
ep, err := index.ExpandPostings(it)
if err != nil {
return err
}
m.postings[l] = ep
return nil
}
func (m mockIndex) Close() error {
return nil
}
func (m mockIndex) SortedLabelValues(name string) ([]string, error) {
values, _ := m.LabelValues(name)
sort.Strings(values)
return values, nil
}
Replace StringTuples with []string Benchmarks show slight cpu/allocs improvements. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Head/n="1"-4 269978625 235305110 -12.84% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 129739974 121646193 -6.24% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 123826274 122056253 -1.43% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 126962188 130038235 +2.42% BenchmarkPostingsForMatchers/Head/i=~".*"-4 6423653989 5991126455 -6.73% BenchmarkPostingsForMatchers/Head/i=~".+"-4 6934647521 7033370634 +1.42% BenchmarkPostingsForMatchers/Head/i=~""-4 1177781285 1121497736 -4.78% BenchmarkPostingsForMatchers/Head/i!=""-4 7033680256 7246094991 +3.02% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 293702332 287440212 -2.13% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 307628268 307039964 -0.19% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 512247746 480003862 -6.29% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 361199794 367066917 +1.62% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 478863761 476037784 -0.59% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 103394659 102902098 -0.48% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 482552781 475453903 -1.47% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 559257389 589297047 +5.37% BenchmarkPostingsForMatchers/Block/n="1"-4 36492 37012 +1.42% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 557788 611903 +9.70% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 554443 573814 +3.49% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 553227 553826 +0.11% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113855090 111707221 -1.89% BenchmarkPostingsForMatchers/Block/i=~".+"-4 133994674 136520728 +1.89% BenchmarkPostingsForMatchers/Block/i=~""-4 38138091 36299898 -4.82% BenchmarkPostingsForMatchers/Block/i!=""-4 28861213 27396723 -5.07% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112699941 110853868 -1.64% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 113198026 111389742 -1.60% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 28994069 27363804 -5.62% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 29709406 28589223 -3.77% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 134695119 135736971 +0.77% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 26783286 25826928 -3.57% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 134733254 134116739 -0.46% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 160713937 158802768 -1.19% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Head/n="1"-4 36 36 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 38 38 +0.00% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 38 38 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 42 40 -4.76% BenchmarkPostingsForMatchers/Head/i=~".*"-4 61 59 -3.28% BenchmarkPostingsForMatchers/Head/i=~".+"-4 100088 100087 -0.00% BenchmarkPostingsForMatchers/Head/i=~""-4 100053 100051 -0.00% BenchmarkPostingsForMatchers/Head/i!=""-4 100087 100085 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 44 42 -4.55% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 50 48 -4.00% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 100076 100074 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 100077 100075 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 100077 100074 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 11167 11165 -0.02% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 100082 100080 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 111265 111261 -0.00% BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 13 -13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 12 10 -16.67% BenchmarkPostingsForMatchers/Block/i=~".+"-4 100040 100038 -0.00% BenchmarkPostingsForMatchers/Block/i=~""-4 100045 100043 -0.00% BenchmarkPostingsForMatchers/Block/i!=""-4 100041 100039 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 17 15 -11.76% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 23 21 -8.70% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 100046 100044 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 100050 100048 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 100049 100047 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 11150 11148 -0.02% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 100055 100053 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 111238 111234 -0.00% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Head/n="1"-4 10887816 10887817 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 5456648 5456648 +0.00% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 5456648 5456648 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 5456792 5456712 -0.00% BenchmarkPostingsForMatchers/Head/i=~".*"-4 258254408 258254328 -0.00% BenchmarkPostingsForMatchers/Head/i=~".+"-4 273912888 273912904 +0.00% BenchmarkPostingsForMatchers/Head/i=~""-4 17266680 17266600 -0.00% BenchmarkPostingsForMatchers/Head/i!=""-4 273912416 273912336 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 7062578 7062498 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 7062770 7062690 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 28152346 28152266 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 22721178 22721098 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 22721336 22721224 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 3623804 3623733 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 22721480 22721400 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 24816652 24816444 -0.00% BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 1544 1464 -5.18% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1606114 1606045 -0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 17264709 17264629 -0.00% BenchmarkPostingsForMatchers/Block/i=~""-4 17264780 17264696 -0.00% BenchmarkPostingsForMatchers/Block/i!=""-4 17264680 17264600 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1606253 1606165 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1606445 1606348 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17264808 17264728 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17264936 17264856 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17264965 17264885 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3148262 3148182 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17265141 17265061 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20416944 20416784 -0.00% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2020-01-01 03:38:01 -08:00
func (m mockIndex) LabelValues(name string) ([]string, error) {
values := []string{}
for l := range m.postings {
if l.Name == name {
values = append(values, l.Value)
}
}
Replace StringTuples with []string Benchmarks show slight cpu/allocs improvements. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Head/n="1"-4 269978625 235305110 -12.84% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 129739974 121646193 -6.24% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 123826274 122056253 -1.43% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 126962188 130038235 +2.42% BenchmarkPostingsForMatchers/Head/i=~".*"-4 6423653989 5991126455 -6.73% BenchmarkPostingsForMatchers/Head/i=~".+"-4 6934647521 7033370634 +1.42% BenchmarkPostingsForMatchers/Head/i=~""-4 1177781285 1121497736 -4.78% BenchmarkPostingsForMatchers/Head/i!=""-4 7033680256 7246094991 +3.02% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 293702332 287440212 -2.13% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 307628268 307039964 -0.19% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 512247746 480003862 -6.29% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 361199794 367066917 +1.62% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 478863761 476037784 -0.59% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 103394659 102902098 -0.48% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 482552781 475453903 -1.47% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 559257389 589297047 +5.37% BenchmarkPostingsForMatchers/Block/n="1"-4 36492 37012 +1.42% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 557788 611903 +9.70% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 554443 573814 +3.49% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 553227 553826 +0.11% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113855090 111707221 -1.89% BenchmarkPostingsForMatchers/Block/i=~".+"-4 133994674 136520728 +1.89% BenchmarkPostingsForMatchers/Block/i=~""-4 38138091 36299898 -4.82% BenchmarkPostingsForMatchers/Block/i!=""-4 28861213 27396723 -5.07% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112699941 110853868 -1.64% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 113198026 111389742 -1.60% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 28994069 27363804 -5.62% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 29709406 28589223 -3.77% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 134695119 135736971 +0.77% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 26783286 25826928 -3.57% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 134733254 134116739 -0.46% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 160713937 158802768 -1.19% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Head/n="1"-4 36 36 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 38 38 +0.00% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 38 38 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 42 40 -4.76% BenchmarkPostingsForMatchers/Head/i=~".*"-4 61 59 -3.28% BenchmarkPostingsForMatchers/Head/i=~".+"-4 100088 100087 -0.00% BenchmarkPostingsForMatchers/Head/i=~""-4 100053 100051 -0.00% BenchmarkPostingsForMatchers/Head/i!=""-4 100087 100085 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 44 42 -4.55% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 50 48 -4.00% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 100076 100074 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 100077 100075 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 100077 100074 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 11167 11165 -0.02% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 100082 100080 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 111265 111261 -0.00% BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 13 -13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 12 10 -16.67% BenchmarkPostingsForMatchers/Block/i=~".+"-4 100040 100038 -0.00% BenchmarkPostingsForMatchers/Block/i=~""-4 100045 100043 -0.00% BenchmarkPostingsForMatchers/Block/i!=""-4 100041 100039 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 17 15 -11.76% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 23 21 -8.70% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 100046 100044 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 100050 100048 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 100049 100047 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 11150 11148 -0.02% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 100055 100053 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 111238 111234 -0.00% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Head/n="1"-4 10887816 10887817 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 5456648 5456648 +0.00% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 5456648 5456648 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 5456792 5456712 -0.00% BenchmarkPostingsForMatchers/Head/i=~".*"-4 258254408 258254328 -0.00% BenchmarkPostingsForMatchers/Head/i=~".+"-4 273912888 273912904 +0.00% BenchmarkPostingsForMatchers/Head/i=~""-4 17266680 17266600 -0.00% BenchmarkPostingsForMatchers/Head/i!=""-4 273912416 273912336 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 7062578 7062498 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 7062770 7062690 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 28152346 28152266 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 22721178 22721098 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 22721336 22721224 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 3623804 3623733 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 22721480 22721400 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 24816652 24816444 -0.00% BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 1544 1464 -5.18% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1606114 1606045 -0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 17264709 17264629 -0.00% BenchmarkPostingsForMatchers/Block/i=~""-4 17264780 17264696 -0.00% BenchmarkPostingsForMatchers/Block/i!=""-4 17264680 17264600 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1606253 1606165 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1606445 1606348 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17264808 17264728 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17264936 17264856 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17264965 17264885 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3148262 3148182 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17265141 17265061 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20416944 20416784 -0.00% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2020-01-01 03:38:01 -08:00
return values, nil
}
Reduce memory used by postings offset table. Rather than keeping the offset of each postings list, instead keep the nth offset of the offset of the posting list. As postings list offsets have always been sorted, we can then get to the closest entry before the one we want an iterate forwards. I haven't done much tuning on the 32 number, it was chosen to try not to read through more than a 4k page of data. Switch to a bulk interface for fetching postings. Use it to avoid having to re-read parts of the posting offset table when querying lots of it. For a index with what BenchmarkHeadPostingForMatchers uses RAM for r.postings drops from 3.79MB to 80.19kB or about 48x. Bytes allocated go down by 30%, and suprisingly CPU usage drops by 4-6% for typical queries too. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05% BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56% BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70% BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01% BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01% BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69% BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69% BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
func (m mockIndex) Postings(name string, values ...string) (index.Postings, error) {
res := make([]index.Postings, 0, len(values))
for _, value := range values {
l := labels.Label{Name: name, Value: value}
res = append(res, index.NewListPostings(m.postings[l]))
}
return index.Merge(res...), nil
}
func (m mockIndex) SortedPostings(p index.Postings) index.Postings {
ep, err := index.ExpandPostings(p)
if err != nil {
return index.ErrPostings(errors.Wrap(err, "expand postings"))
}
sort.Slice(ep, func(i, j int) bool {
return labels.Compare(m.series[ep[i]].l, m.series[ep[j]].l) < 0
})
return index.NewListPostings(ep)
}
func (m mockIndex) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error {
s, ok := m.series[ref]
if !ok {
return storage.ErrNotFound
}
*lset = append((*lset)[:0], s.l...)
*chks = append((*chks)[:0], s.chunks...)
return nil
}
func (m mockIndex) LabelNames() ([]string, error) {
names := map[string]struct{}{}
for l := range m.postings {
names[l.Name] = struct{}{}
}
l := make([]string, 0, len(names))
for name := range names {
l = append(l, name)
}
sort.Strings(l)
return l, nil
}
type mockSeries struct {
labels func() labels.Labels
iterator func() chunkenc.Iterator
}
func newSeries(l map[string]string, s []tsdbutil.Sample) storage.Series {
return &mockSeries{
labels: func() labels.Labels { return labels.FromMap(l) },
iterator: func() chunkenc.Iterator { return newListSeriesIterator(s) },
}
}
func (m *mockSeries) Labels() labels.Labels { return m.labels() }
func (m *mockSeries) Iterator() chunkenc.Iterator { return m.iterator() }
type listSeriesIterator struct {
list []tsdbutil.Sample
idx int
}
func newListSeriesIterator(list []tsdbutil.Sample) *listSeriesIterator {
return &listSeriesIterator{list: list, idx: -1}
}
func (it *listSeriesIterator) At() (int64, float64) {
s := it.list[it.idx]
return s.T(), s.V()
}
func (it *listSeriesIterator) Next() bool {
it.idx++
return it.idx < len(it.list)
}
func (it *listSeriesIterator) Seek(t int64) bool {
if it.idx == -1 {
it.idx = 0
}
// Do binary search between current position and end.
it.idx = sort.Search(len(it.list)-it.idx, func(i int) bool {
s := it.list[i+it.idx]
return s.T() >= t
})
return it.idx < len(it.list)
}
func (it *listSeriesIterator) Err() error {
return nil
}
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
func BenchmarkQueryIterator(b *testing.B) {
cases := []struct {
numBlocks int
numSeries int
numSamplesPerSeriesPerBlock int
overlapPercentages []int // >=0, <=100, this is w.r.t. the previous block.
}{
{
numBlocks: 20,
numSeries: 1000,
numSamplesPerSeriesPerBlock: 20000,
overlapPercentages: []int{0, 10, 30},
},
}
for _, c := range cases {
for _, overlapPercentage := range c.overlapPercentages {
benchMsg := fmt.Sprintf("nBlocks=%d,nSeries=%d,numSamplesPerSeriesPerBlock=%d,overlap=%d%%",
c.numBlocks, c.numSeries, c.numSamplesPerSeriesPerBlock, overlapPercentage)
b.Run(benchMsg, func(b *testing.B) {
dir, err := ioutil.TempDir("", "bench_query_iterator")
testutil.Ok(b, err)
defer func() {
testutil.Ok(b, os.RemoveAll(dir))
}()
var (
blocks []*Block
overlapDelta = int64(overlapPercentage * c.numSamplesPerSeriesPerBlock / 100)
prefilledLabels []map[string]string
generatedSeries []storage.Series
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
)
for i := int64(0); i < int64(c.numBlocks); i++ {
offset := i * overlapDelta
mint := i*int64(c.numSamplesPerSeriesPerBlock) - offset
maxt := mint + int64(c.numSamplesPerSeriesPerBlock) - 1
if len(prefilledLabels) == 0 {
generatedSeries = genSeries(c.numSeries, 10, mint, maxt)
for _, s := range generatedSeries {
prefilledLabels = append(prefilledLabels, s.Labels().Map())
}
} else {
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
}
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil)
testutil.Ok(b, err)
blocks = append(blocks, block)
defer block.Close()
}
que := &querier{
blocks: make([]storage.Querier, 0, len(blocks)),
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
}
for _, blk := range blocks {
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
testutil.Ok(b, err)
que.blocks = append(que.blocks, q)
}
var sq storage.Querier = que
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
if overlapPercentage > 0 {
sq = &verticalQuerier{
querier: *que,
}
}
defer sq.Close()
benchQuery(b, c.numSeries, sq, labels.Selector{labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")})
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
})
}
}
}
func BenchmarkQuerySeek(b *testing.B) {
cases := []struct {
numBlocks int
numSeries int
numSamplesPerSeriesPerBlock int
overlapPercentages []int // >=0, <=100, this is w.r.t. the previous block.
}{
{
numBlocks: 20,
numSeries: 100,
numSamplesPerSeriesPerBlock: 2000,
overlapPercentages: []int{0, 10, 30, 50},
},
}
for _, c := range cases {
for _, overlapPercentage := range c.overlapPercentages {
benchMsg := fmt.Sprintf("nBlocks=%d,nSeries=%d,numSamplesPerSeriesPerBlock=%d,overlap=%d%%",
c.numBlocks, c.numSeries, c.numSamplesPerSeriesPerBlock, overlapPercentage)
b.Run(benchMsg, func(b *testing.B) {
dir, err := ioutil.TempDir("", "bench_query_iterator")
testutil.Ok(b, err)
defer func() {
testutil.Ok(b, os.RemoveAll(dir))
}()
var (
blocks []*Block
overlapDelta = int64(overlapPercentage * c.numSamplesPerSeriesPerBlock / 100)
prefilledLabels []map[string]string
generatedSeries []storage.Series
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
)
for i := int64(0); i < int64(c.numBlocks); i++ {
offset := i * overlapDelta
mint := i*int64(c.numSamplesPerSeriesPerBlock) - offset
maxt := mint + int64(c.numSamplesPerSeriesPerBlock) - 1
if len(prefilledLabels) == 0 {
generatedSeries = genSeries(c.numSeries, 10, mint, maxt)
for _, s := range generatedSeries {
prefilledLabels = append(prefilledLabels, s.Labels().Map())
}
} else {
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
}
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil)
testutil.Ok(b, err)
blocks = append(blocks, block)
defer block.Close()
}
que := &querier{
blocks: make([]storage.Querier, 0, len(blocks)),
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
}
for _, blk := range blocks {
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
testutil.Ok(b, err)
que.blocks = append(que.blocks, q)
}
var sq storage.Querier = que
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
if overlapPercentage > 0 {
sq = &verticalQuerier{
querier: *que,
}
}
defer sq.Close()
mint := blocks[0].meta.MinTime
maxt := blocks[len(blocks)-1].meta.MaxTime
b.ResetTimer()
b.ReportAllocs()
*: Consistent Error/Warning handling for SeriesSet iterator: Allowing Async Select (#7251) * Add errors and Warnings to SeriesSet Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Change Querier interface and refactor accordingly Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor promql/engine to propagate warnings at eval stage Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Make sure all the series from all Selects are pre-advanced Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Separate merge series sets Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Clean Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor merge querier failure handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactored and simplified fanout with improvements from incoming chunk iterator PRs. * Secondary logic is hidden, instead of weird failed series set logic we had. * Fanout is well commented * Fanout closing record all errors * MergeQuerier improved API (clearer) * deferredGenericMergeSeriesSet is not needed as we return no samples anyway for failed series sets (next = false). Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fix formatting Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix CI issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Added final tests for error handling. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. * Moved hints in populate to be allocated only when needed. * Used sync.Once in secondary Querier to achieve all-or-nothing partial response logic. * Select after first Next is done will panic. NOTE: in lazySeriesSet in theory we could just panic, I think however we can totally just return error, it will panic in expand anyway. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Utilize errWithWarnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix recently introduced expansion issue Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add tests for secondary querier error handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Implement lazy merge Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add name to test cases Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Reorganize Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Remove redundant warnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix rebase mistake Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-06-09 09:57:31 -07:00
ss := sq.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
for ss.Next() {
it := ss.At().Iterator()
for t := mint; t <= maxt; t++ {
it.Seek(t)
}
testutil.Ok(b, it.Err())
}
testutil.Ok(b, ss.Err())
testutil.Ok(b, err)
*: Consistent Error/Warning handling for SeriesSet iterator: Allowing Async Select (#7251) * Add errors and Warnings to SeriesSet Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Change Querier interface and refactor accordingly Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor promql/engine to propagate warnings at eval stage Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Make sure all the series from all Selects are pre-advanced Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Separate merge series sets Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Clean Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor merge querier failure handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactored and simplified fanout with improvements from incoming chunk iterator PRs. * Secondary logic is hidden, instead of weird failed series set logic we had. * Fanout is well commented * Fanout closing record all errors * MergeQuerier improved API (clearer) * deferredGenericMergeSeriesSet is not needed as we return no samples anyway for failed series sets (next = false). Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fix formatting Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix CI issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Added final tests for error handling. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. * Moved hints in populate to be allocated only when needed. * Used sync.Once in secondary Querier to achieve all-or-nothing partial response logic. * Select after first Next is done will panic. NOTE: in lazySeriesSet in theory we could just panic, I think however we can totally just return error, it will panic in expand anyway. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Utilize errWithWarnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix recently introduced expansion issue Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add tests for secondary querier error handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Implement lazy merge Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add name to test cases Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Reorganize Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Remove redundant warnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix rebase mistake Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-06-09 09:57:31 -07:00
testutil.Equals(b, 0, len(ss.Warnings()))
Vertical query merging and compaction (#370) * Vertical series iterator Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Select overlapped blocks first in compactor Plan() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Added vertical compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Code cleanup and comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix tests Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add benchmark for compaction Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Perform vertical compaction only when blocks are overlapping. Actions for vertical compaction: * Sorting chunk metas * Calling chunks.MergeOverlappingChunks on the chunks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for vertical compaction * BenchmarkNormalCompaction => BenchmarkCompaction * Moved the benchmark from db_test.go to compact_test.go Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Benchmark for query iterator and seek for non overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Vertical query merge only for overlapping blocks Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Simplify logging in Compact(...) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG.md Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Calculate overlapping inside populateBlock Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * MinTime and MaxTime for BlockReader. Using this to find overlapping blocks in populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Sort blocks w.r.t. MinTime in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping in LeveledCompactor.write() instead of returning bool Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Log about overlapping inside LeveledCompactor.populateBlock() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor createBlock to take optional []Series Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * review1 Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com> * Updated CHANGELOG and minor nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * nits Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Updated CHANGELOG Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Refactor iterator and seek benchmarks for Querier. Also has as overlapping blocks. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Additional test case Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * genSeries takes optional labels. Updated BenchmarkQueryIterator and BenchmarkQuerySeek. Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Split genSeries into genSeries and populateSeries Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Check error in benchmark Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Fix review comments Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Warn about overlapping blocks in reload() Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2019-02-14 05:29:41 -08:00
})
}
}
}
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
// Refer to https://github.com/prometheus/prometheus/issues/2651.
func BenchmarkSetMatcher(b *testing.B) {
cases := []struct {
numBlocks int
numSeries int
numSamplesPerSeriesPerBlock int
cardinality int
pattern string
}{
// The first three cases are to find out whether the set
// matcher is always faster than regex matcher.
{
numBlocks: 1,
numSeries: 1,
numSamplesPerSeriesPerBlock: 10,
cardinality: 100,
pattern: "1|2|3|4|5|6|7|8|9|10",
},
{
numBlocks: 1,
numSeries: 15,
numSamplesPerSeriesPerBlock: 10,
cardinality: 100,
pattern: "1|2|3|4|5|6|7|8|9|10",
},
{
numBlocks: 1,
numSeries: 15,
numSamplesPerSeriesPerBlock: 10,
cardinality: 100,
pattern: "1|2|3",
},
// Big data sizes benchmarks.
{
numBlocks: 20,
numSeries: 1000,
numSamplesPerSeriesPerBlock: 10,
cardinality: 100,
pattern: "1|2|3",
},
{
numBlocks: 20,
numSeries: 1000,
numSamplesPerSeriesPerBlock: 10,
cardinality: 100,
pattern: "1|2|3|4|5|6|7|8|9|10",
},
// Increase cardinality.
{
numBlocks: 1,
numSeries: 100000,
numSamplesPerSeriesPerBlock: 10,
cardinality: 100000,
pattern: "1|2|3|4|5|6|7|8|9|10",
},
{
numBlocks: 1,
numSeries: 500000,
numSamplesPerSeriesPerBlock: 10,
cardinality: 500000,
pattern: "1|2|3|4|5|6|7|8|9|10",
},
{
numBlocks: 10,
numSeries: 500000,
numSamplesPerSeriesPerBlock: 10,
cardinality: 500000,
pattern: "1|2|3|4|5|6|7|8|9|10",
},
{
numBlocks: 1,
numSeries: 1000000,
numSamplesPerSeriesPerBlock: 10,
cardinality: 1000000,
pattern: "1|2|3|4|5|6|7|8|9|10",
},
}
for _, c := range cases {
dir, err := ioutil.TempDir("", "bench_postings_for_matchers")
testutil.Ok(b, err)
defer func() {
testutil.Ok(b, os.RemoveAll(dir))
}()
var (
blocks []*Block
prefilledLabels []map[string]string
generatedSeries []storage.Series
)
for i := int64(0); i < int64(c.numBlocks); i++ {
mint := i * int64(c.numSamplesPerSeriesPerBlock)
maxt := mint + int64(c.numSamplesPerSeriesPerBlock) - 1
if len(prefilledLabels) == 0 {
generatedSeries = genSeries(c.numSeries, 10, mint, maxt)
for _, s := range generatedSeries {
prefilledLabels = append(prefilledLabels, s.Labels().Map())
}
} else {
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
}
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil)
testutil.Ok(b, err)
blocks = append(blocks, block)
defer block.Close()
}
que := &querier{
blocks: make([]storage.Querier, 0, len(blocks)),
}
for _, blk := range blocks {
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
testutil.Ok(b, err)
que.blocks = append(que.blocks, q)
}
defer que.Close()
benchMsg := fmt.Sprintf("nSeries=%d,nBlocks=%d,cardinality=%d,pattern=\"%s\"", c.numSeries, c.numBlocks, c.cardinality, c.pattern)
b.Run(benchMsg, func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
*: Consistent Error/Warning handling for SeriesSet iterator: Allowing Async Select (#7251) * Add errors and Warnings to SeriesSet Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Change Querier interface and refactor accordingly Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor promql/engine to propagate warnings at eval stage Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Make sure all the series from all Selects are pre-advanced Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Separate merge series sets Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Clean Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor merge querier failure handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactored and simplified fanout with improvements from incoming chunk iterator PRs. * Secondary logic is hidden, instead of weird failed series set logic we had. * Fanout is well commented * Fanout closing record all errors * MergeQuerier improved API (clearer) * deferredGenericMergeSeriesSet is not needed as we return no samples anyway for failed series sets (next = false). Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fix formatting Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix CI issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Added final tests for error handling. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. * Moved hints in populate to be allocated only when needed. * Used sync.Once in secondary Querier to achieve all-or-nothing partial response logic. * Select after first Next is done will panic. NOTE: in lazySeriesSet in theory we could just panic, I think however we can totally just return error, it will panic in expand anyway. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Utilize errWithWarnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix recently introduced expansion issue Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add tests for secondary querier error handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Implement lazy merge Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add name to test cases Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Reorganize Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Remove redundant warnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix rebase mistake Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-06-09 09:57:31 -07:00
ss := que.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "test", c.pattern))
for ss.Next() {
}
testutil.Ok(b, ss.Err())
testutil.Equals(b, 0, len(ss.Warnings()))
}
})
}
}
// Refer to https://github.com/prometheus/prometheus/issues/2651.
func TestFindSetMatches(t *testing.T) {
cases := []struct {
pattern string
exp []string
}{
// Simple sets.
{
pattern: "^(?:foo|bar|baz)$",
exp: []string{
"foo",
"bar",
"baz",
},
},
// Simple sets containing escaped characters.
{
pattern: "^(?:fo\\.o|bar\\?|\\^baz)$",
exp: []string{
"fo.o",
"bar?",
"^baz",
},
},
// Simple sets containing special characters without escaping.
{
pattern: "^(?:fo.o|bar?|^baz)$",
exp: nil,
},
// Missing wrapper.
{
pattern: "foo|bar|baz",
exp: nil,
},
}
for _, c := range cases {
matches := findSetMatches(c.pattern)
if len(c.exp) == 0 {
if len(matches) != 0 {
t.Errorf("Evaluating %s, unexpected result %v", c.pattern, matches)
}
} else {
if len(matches) != len(c.exp) {
t.Errorf("Evaluating %s, length of result not equal to exp", c.pattern)
} else {
for i := 0; i < len(c.exp); i++ {
if c.exp[i] != matches[i] {
t.Errorf("Evaluating %s, unexpected result %s", c.pattern, matches[i])
}
}
}
}
}
}
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
func TestPostingsForMatchers(t *testing.T) {
M-map full chunks of Head from disk (#6679) When appending to the head and a chunk is full it is flushed to the disk and m-mapped (memory mapped) to free up memory Prom startup now happens in these stages - Iterate the m-maped chunks from disk and keep a map of series reference to its slice of mmapped chunks. - Iterate the WAL as usual. Whenever we create a new series, look for it's mmapped chunks in the map created before and add it to that series. If a head chunk is corrupted the currpted one and all chunks after that are deleted and the data after the corruption is recovered from the existing WAL which means that a corruption in m-mapped files results in NO data loss. [Mmaped chunks format](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/head_chunks.md) - main difference is that the chunk for mmaping now also includes series reference because there is no index for mapping series to chunks. [The block chunks](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/chunks.md) are accessed from the index which includes the offsets for the chunks in the chunks file - example - chunks of series ID have offsets 200, 500 etc in the chunk files. In case of mmaped chunks, the offsets are stored in memory and accessed from that. During WAL replay, these offsets are restored by iterating all m-mapped chunks as stated above by matching the series id present in the chunk header and offset of that chunk in that file. **Prombench results** _WAL Replay_ 1h Wal reply time 30% less wal reply time - 4m31 vs 3m36 2h Wal reply time 20% less wal reply time - 8m16 vs 7m _Memory During WAL Replay_ High Churn: 10-15% less RAM - 32gb vs 28gb 20% less RAM after compaction 34gb vs 27gb No Churn: 20-30% less RAM - 23gb vs 18gb 40% less RAM after compaction 32.5gb vs 20gb Screenshots are in [this comment](https://github.com/prometheus/prometheus/pull/6679#issuecomment-621678932) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2020-05-06 08:30:00 -07:00
chunkDir, err := ioutil.TempDir("", "chunk_dir")
testutil.Ok(t, err)
defer func() {
testutil.Ok(t, os.RemoveAll(chunkDir))
}()
h, err := NewHead(nil, nil, nil, 1000, chunkDir, nil, DefaultStripeSize, nil)
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
testutil.Ok(t, err)
defer func() {
testutil.Ok(t, h.Close())
}()
app := h.Appender()
app.Add(labels.FromStrings("n", "1"), 0, 0)
app.Add(labels.FromStrings("n", "1", "i", "a"), 0, 0)
app.Add(labels.FromStrings("n", "1", "i", "b"), 0, 0)
app.Add(labels.FromStrings("n", "2"), 0, 0)
app.Add(labels.FromStrings("n", "2.5"), 0, 0)
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
testutil.Ok(t, app.Commit())
cases := []struct {
matchers []*labels.Matcher
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp []labels.Labels
}{
// Simple equals.
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchEqual, "i", "a")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchEqual, "i", "missing")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "missing", "")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
},
},
// Not equals.
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotEqual, "n", "1")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotEqual, "i", "")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotEqual, "missing", "")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotEqual, "i", "a")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "b"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotEqual, "i", "")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
},
},
// Regex.
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "^1$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^a$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^a?$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "^$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^.+$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
},
},
// Not regex.
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "^1$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "b"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a?$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "b"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.*$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.+$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1"),
},
},
// Combinations.
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotEqual, "i", ""), labels.MustNewMatcher(labels.MatchEqual, "i", "a")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotEqual, "i", "b"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^(b|a).*$")},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
},
},
// Set optimization for Regex.
// Refer to https://github.com/prometheus/prometheus/issues/2651.
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "1|2")},
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "2"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "a|b")},
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "x1|2")},
exp: []labels.Labels{
labels.FromStrings("n", "2"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "2|2\\.5")},
exp: []labels.Labels{
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
},
},
// Empty value.
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "c||d")},
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
},
},
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
}
ir, err := h.Index()
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
testutil.Ok(t, err)
for _, c := range cases {
exp := map[string]struct{}{}
for _, l := range c.exp {
exp[l.String()] = struct{}{}
}
p, err := PostingsForMatchers(ir, c.matchers...)
testutil.Ok(t, err)
for p.Next() {
lbls := labels.Labels{}
testutil.Ok(t, ir.Series(p.At(), &lbls, &[]chunks.Meta{}))
Be smarter in how we look at matchers. (#572) * Add unittests for PostingsForMatcher. * Selector methods are all stateless, don't need a reference. * Be smarter in how we look at matchers. Look at all matchers to see if a label can be empty. Optimise Not handling, so i!="2" is a simple lookup rather than an inverse postings list. All all the Withouts together, rather than having to subtract each from all postings. Change the pre-expand the postings logic to always do it before doing a Without only. Don't do that if it's already a list. The initial goal here was that the oft-seen pattern i=~"something.+",i!="foo",i!="bar" becomes more efficient. benchmark old ns/op new ns/op delta BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62% BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65% BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90% BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57% BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04% BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75% BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10% BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70% BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12% BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96% BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03% BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12% BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93% BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
if _, ok := exp[lbls.String()]; !ok {
t.Errorf("Evaluating %v, unexpected result %s", c.matchers, lbls.String())
} else {
delete(exp, lbls.String())
}
}
testutil.Ok(t, p.Err())
if len(exp) != 0 {
t.Errorf("Evaluating %v, missing results %+v", c.matchers, exp)
}
}
}
// TestClose ensures that calling Close more than once doesn't block and doesn't panic.
func TestClose(t *testing.T) {
dir, err := ioutil.TempDir("", "test_storage")
if err != nil {
t.Fatalf("Opening test dir failed: %s", err)
}
defer func() {
testutil.Ok(t, os.RemoveAll(dir))
}()
createBlock(t, dir, genSeries(1, 1, 0, 10))
createBlock(t, dir, genSeries(1, 1, 10, 20))
db, err := Open(dir, nil, nil, DefaultOptions())
if err != nil {
t.Fatalf("Opening test storage failed: %s", err)
}
defer func() {
testutil.Ok(t, db.Close())
}()
q, err := db.Querier(context.TODO(), 0, 20)
testutil.Ok(t, err)
testutil.Ok(t, q.Close())
testutil.NotOk(t, q.Close())
}
func BenchmarkQueries(b *testing.B) {
cases := map[string]labels.Selector{
"Eq Matcher: Expansion - 1": {
labels.MustNewMatcher(labels.MatchEqual, "la", "va"),
},
"Eq Matcher: Expansion - 2": {
labels.MustNewMatcher(labels.MatchEqual, "la", "va"),
labels.MustNewMatcher(labels.MatchEqual, "lb", "vb"),
},
"Eq Matcher: Expansion - 3": {
labels.MustNewMatcher(labels.MatchEqual, "la", "va"),
labels.MustNewMatcher(labels.MatchEqual, "lb", "vb"),
labels.MustNewMatcher(labels.MatchEqual, "lc", "vc"),
},
"Regex Matcher: Expansion - 1": {
labels.MustNewMatcher(labels.MatchRegexp, "la", ".*va"),
},
"Regex Matcher: Expansion - 2": {
labels.MustNewMatcher(labels.MatchRegexp, "la", ".*va"),
labels.MustNewMatcher(labels.MatchRegexp, "lb", ".*vb"),
},
"Regex Matcher: Expansion - 3": {
labels.MustNewMatcher(labels.MatchRegexp, "la", ".*va"),
labels.MustNewMatcher(labels.MatchRegexp, "lb", ".*vb"),
labels.MustNewMatcher(labels.MatchRegexp, "lc", ".*vc"),
},
}
queryTypes := make(map[string]storage.Querier)
defer func() {
for _, q := range queryTypes {
// Can't run a check for error here as some of these will fail as
// queryTypes is using the same slice for the different block queriers
Spelling (#6517) * spelling: alertmanager Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: attributes Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: autocomplete Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: bootstrap Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: caught Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: chunkenc Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: compaction Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: corrupted Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: deletable Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: expected Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: fine-grained Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: initialized Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: iteration Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: javascript Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: multiple Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: number Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: overlapping Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: possible Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: postings Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: procedure Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: programmatic Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: queuing Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: querier Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: repairing Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: received Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: reproducible Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: retention Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: sample Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: segements Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: semantic Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: software [LICENSE] Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: staging Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: timestamp Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: unfortunately Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: uvarint Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: subsequently Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: ressamples Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-01-02 06:54:09 -08:00
// and would have been closed in the previous iteration.
q.Close()
}
}()
for title, selectors := range cases {
for _, nSeries := range []int{10} {
for _, nSamples := range []int64{1000, 10000, 100000} {
dir, err := ioutil.TempDir("", "test_persisted_query")
testutil.Ok(b, err)
defer func() {
testutil.Ok(b, os.RemoveAll(dir))
}()
series := genSeries(nSeries, 5, 1, int64(nSamples))
// Add some common labels to make the matchers select these series.
{
var commonLbls labels.Labels
for _, selector := range selectors {
switch selector.Type {
case labels.MatchEqual:
commonLbls = append(commonLbls, labels.Label{Name: selector.Name, Value: selector.Value})
case labels.MatchRegexp:
commonLbls = append(commonLbls, labels.Label{Name: selector.Name, Value: selector.Value})
}
}
for i := range commonLbls {
s := series[i].(*mockSeries)
allLabels := append(commonLbls, s.Labels()...)
s = &mockSeries{
labels: func() labels.Labels { return allLabels },
iterator: s.iterator,
}
series[i] = s
}
}
qs := make([]storage.Querier, 0, 10)
for x := 0; x <= 10; x++ {
block, err := OpenBlock(nil, createBlock(b, dir, series), nil)
testutil.Ok(b, err)
q, err := NewBlockQuerier(block, 1, int64(nSamples))
testutil.Ok(b, err)
qs = append(qs, q)
}
queryTypes["_1-Block"] = &querier{blocks: qs[:1]}
queryTypes["_3-Blocks"] = &querier{blocks: qs[0:3]}
queryTypes["_10-Blocks"] = &querier{blocks: qs}
M-map full chunks of Head from disk (#6679) When appending to the head and a chunk is full it is flushed to the disk and m-mapped (memory mapped) to free up memory Prom startup now happens in these stages - Iterate the m-maped chunks from disk and keep a map of series reference to its slice of mmapped chunks. - Iterate the WAL as usual. Whenever we create a new series, look for it's mmapped chunks in the map created before and add it to that series. If a head chunk is corrupted the currpted one and all chunks after that are deleted and the data after the corruption is recovered from the existing WAL which means that a corruption in m-mapped files results in NO data loss. [Mmaped chunks format](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/head_chunks.md) - main difference is that the chunk for mmaping now also includes series reference because there is no index for mapping series to chunks. [The block chunks](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/chunks.md) are accessed from the index which includes the offsets for the chunks in the chunks file - example - chunks of series ID have offsets 200, 500 etc in the chunk files. In case of mmaped chunks, the offsets are stored in memory and accessed from that. During WAL replay, these offsets are restored by iterating all m-mapped chunks as stated above by matching the series id present in the chunk header and offset of that chunk in that file. **Prombench results** _WAL Replay_ 1h Wal reply time 30% less wal reply time - 4m31 vs 3m36 2h Wal reply time 20% less wal reply time - 8m16 vs 7m _Memory During WAL Replay_ High Churn: 10-15% less RAM - 32gb vs 28gb 20% less RAM after compaction 34gb vs 27gb No Churn: 20-30% less RAM - 23gb vs 18gb 40% less RAM after compaction 32.5gb vs 20gb Screenshots are in [this comment](https://github.com/prometheus/prometheus/pull/6679#issuecomment-621678932) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2020-05-06 08:30:00 -07:00
chunkDir, err := ioutil.TempDir("", "chunk_dir")
testutil.Ok(b, err)
defer func() {
testutil.Ok(b, os.RemoveAll(chunkDir))
}()
head := createHead(b, series, chunkDir)
qHead, err := NewBlockQuerier(head, 1, int64(nSamples))
testutil.Ok(b, err)
queryTypes["_Head"] = qHead
for qtype, querier := range queryTypes {
b.Run(title+qtype+"_nSeries:"+strconv.Itoa(nSeries)+"_nSamples:"+strconv.Itoa(int(nSamples)), func(b *testing.B) {
expExpansions, err := strconv.Atoi(string(title[len(title)-1]))
testutil.Ok(b, err)
benchQuery(b, expExpansions, querier, selectors)
})
}
M-map full chunks of Head from disk (#6679) When appending to the head and a chunk is full it is flushed to the disk and m-mapped (memory mapped) to free up memory Prom startup now happens in these stages - Iterate the m-maped chunks from disk and keep a map of series reference to its slice of mmapped chunks. - Iterate the WAL as usual. Whenever we create a new series, look for it's mmapped chunks in the map created before and add it to that series. If a head chunk is corrupted the currpted one and all chunks after that are deleted and the data after the corruption is recovered from the existing WAL which means that a corruption in m-mapped files results in NO data loss. [Mmaped chunks format](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/head_chunks.md) - main difference is that the chunk for mmaping now also includes series reference because there is no index for mapping series to chunks. [The block chunks](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/chunks.md) are accessed from the index which includes the offsets for the chunks in the chunks file - example - chunks of series ID have offsets 200, 500 etc in the chunk files. In case of mmaped chunks, the offsets are stored in memory and accessed from that. During WAL replay, these offsets are restored by iterating all m-mapped chunks as stated above by matching the series id present in the chunk header and offset of that chunk in that file. **Prombench results** _WAL Replay_ 1h Wal reply time 30% less wal reply time - 4m31 vs 3m36 2h Wal reply time 20% less wal reply time - 8m16 vs 7m _Memory During WAL Replay_ High Churn: 10-15% less RAM - 32gb vs 28gb 20% less RAM after compaction 34gb vs 27gb No Churn: 20-30% less RAM - 23gb vs 18gb 40% less RAM after compaction 32.5gb vs 20gb Screenshots are in [this comment](https://github.com/prometheus/prometheus/pull/6679#issuecomment-621678932) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2020-05-06 08:30:00 -07:00
testutil.Ok(b, head.Close())
}
}
}
}
func benchQuery(b *testing.B, expExpansions int, q storage.Querier, selectors labels.Selector) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
*: Consistent Error/Warning handling for SeriesSet iterator: Allowing Async Select (#7251) * Add errors and Warnings to SeriesSet Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Change Querier interface and refactor accordingly Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor promql/engine to propagate warnings at eval stage Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Make sure all the series from all Selects are pre-advanced Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Separate merge series sets Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Clean Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor merge querier failure handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactored and simplified fanout with improvements from incoming chunk iterator PRs. * Secondary logic is hidden, instead of weird failed series set logic we had. * Fanout is well commented * Fanout closing record all errors * MergeQuerier improved API (clearer) * deferredGenericMergeSeriesSet is not needed as we return no samples anyway for failed series sets (next = false). Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fix formatting Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix CI issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Added final tests for error handling. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. * Moved hints in populate to be allocated only when needed. * Used sync.Once in secondary Querier to achieve all-or-nothing partial response logic. * Select after first Next is done will panic. NOTE: in lazySeriesSet in theory we could just panic, I think however we can totally just return error, it will panic in expand anyway. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Utilize errWithWarnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix recently introduced expansion issue Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add tests for secondary querier error handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Implement lazy merge Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add name to test cases Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Reorganize Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Remove redundant warnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix rebase mistake Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-06-09 09:57:31 -07:00
ss := q.Select(false, nil, selectors...)
var actualExpansions int
for ss.Next() {
s := ss.At()
s.Labels()
it := s.Iterator()
for it.Next() {
}
actualExpansions++
}
*: Consistent Error/Warning handling for SeriesSet iterator: Allowing Async Select (#7251) * Add errors and Warnings to SeriesSet Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Change Querier interface and refactor accordingly Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor promql/engine to propagate warnings at eval stage Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Make sure all the series from all Selects are pre-advanced Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Separate merge series sets Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Clean Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor merge querier failure handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactored and simplified fanout with improvements from incoming chunk iterator PRs. * Secondary logic is hidden, instead of weird failed series set logic we had. * Fanout is well commented * Fanout closing record all errors * MergeQuerier improved API (clearer) * deferredGenericMergeSeriesSet is not needed as we return no samples anyway for failed series sets (next = false). Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fix formatting Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix CI issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Added final tests for error handling. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. * Moved hints in populate to be allocated only when needed. * Used sync.Once in secondary Querier to achieve all-or-nothing partial response logic. * Select after first Next is done will panic. NOTE: in lazySeriesSet in theory we could just panic, I think however we can totally just return error, it will panic in expand anyway. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Utilize errWithWarnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix recently introduced expansion issue Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add tests for secondary querier error handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Implement lazy merge Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add name to test cases Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Reorganize Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Remove redundant warnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix rebase mistake Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-06-09 09:57:31 -07:00
testutil.Ok(b, ss.Err())
testutil.Equals(b, 0, len(ss.Warnings()))
testutil.Equals(b, expExpansions, actualExpansions)
testutil.Ok(b, ss.Err())
}
}
// mockMatcherIndex is used to check if the regex matcher works as expected.
type mockMatcherIndex struct{}
func (m mockMatcherIndex) Symbols() index.StringIter { return nil }
func (m mockMatcherIndex) Close() error { return nil }
// SortedLabelValues will return error if it is called.
func (m mockMatcherIndex) SortedLabelValues(name string) ([]string, error) {
return []string{}, errors.New("sorted label values called")
}
// LabelValues will return error if it is called.
func (m mockMatcherIndex) LabelValues(name string) ([]string, error) {
return []string{}, errors.New("label values called")
}
func (m mockMatcherIndex) Postings(name string, values ...string) (index.Postings, error) {
return index.EmptyPostings(), nil
}
func (m mockMatcherIndex) SortedPostings(p index.Postings) index.Postings {
return index.EmptyPostings()
}
func (m mockMatcherIndex) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error {
return nil
}
func (m mockMatcherIndex) LabelNames() ([]string, error) { return []string{}, nil }
func TestPostingsForMatcher(t *testing.T) {
cases := []struct {
matcher *labels.Matcher
hasError bool
}{
{
// Equal label matcher will just return.
matcher: labels.MustNewMatcher(labels.MatchEqual, "test", "test"),
hasError: false,
},
{
// Regex matcher which doesn't have '|' will call Labelvalues()
matcher: labels.MustNewMatcher(labels.MatchRegexp, "test", ".*"),
hasError: true,
},
{
matcher: labels.MustNewMatcher(labels.MatchRegexp, "test", "a|b"),
hasError: false,
},
{
// Test case for double quoted regex matcher
matcher: labels.MustNewMatcher(labels.MatchRegexp, "test", "^(?:a|b)$"),
hasError: true,
},
}
for _, tc := range cases {
ir := &mockMatcherIndex{}
_, err := postingsForMatcher(ir, tc.matcher)
if tc.hasError {
testutil.NotOk(t, err)
} else {
testutil.Ok(t, err)
}
}
}