prometheus/tsdb/querier_bench_test.go

243 lines
8.1 KiB
Go
Raw Normal View History

// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"fmt"
"io/ioutil"
"os"
"strconv"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/hashcache"
)
// Make entries ~50B in size, to emulate real-world high cardinality.
const (
postingsBenchSuffix = "aaaaaaaaaabbbbbbbbbbccccccccccdddddddddd"
)
func BenchmarkPostingsForMatchers(b *testing.B) {
M-map full chunks of Head from disk (#6679) When appending to the head and a chunk is full it is flushed to the disk and m-mapped (memory mapped) to free up memory Prom startup now happens in these stages - Iterate the m-maped chunks from disk and keep a map of series reference to its slice of mmapped chunks. - Iterate the WAL as usual. Whenever we create a new series, look for it's mmapped chunks in the map created before and add it to that series. If a head chunk is corrupted the currpted one and all chunks after that are deleted and the data after the corruption is recovered from the existing WAL which means that a corruption in m-mapped files results in NO data loss. [Mmaped chunks format](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/head_chunks.md) - main difference is that the chunk for mmaping now also includes series reference because there is no index for mapping series to chunks. [The block chunks](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/chunks.md) are accessed from the index which includes the offsets for the chunks in the chunks file - example - chunks of series ID have offsets 200, 500 etc in the chunk files. In case of mmaped chunks, the offsets are stored in memory and accessed from that. During WAL replay, these offsets are restored by iterating all m-mapped chunks as stated above by matching the series id present in the chunk header and offset of that chunk in that file. **Prombench results** _WAL Replay_ 1h Wal reply time 30% less wal reply time - 4m31 vs 3m36 2h Wal reply time 20% less wal reply time - 8m16 vs 7m _Memory During WAL Replay_ High Churn: 10-15% less RAM - 32gb vs 28gb 20% less RAM after compaction 34gb vs 27gb No Churn: 20-30% less RAM - 23gb vs 18gb 40% less RAM after compaction 32.5gb vs 20gb Screenshots are in [this comment](https://github.com/prometheus/prometheus/pull/6679#issuecomment-621678932) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2020-05-06 08:30:00 -07:00
chunkDir, err := ioutil.TempDir("", "chunk_dir")
require.NoError(b, err)
M-map full chunks of Head from disk (#6679) When appending to the head and a chunk is full it is flushed to the disk and m-mapped (memory mapped) to free up memory Prom startup now happens in these stages - Iterate the m-maped chunks from disk and keep a map of series reference to its slice of mmapped chunks. - Iterate the WAL as usual. Whenever we create a new series, look for it's mmapped chunks in the map created before and add it to that series. If a head chunk is corrupted the currpted one and all chunks after that are deleted and the data after the corruption is recovered from the existing WAL which means that a corruption in m-mapped files results in NO data loss. [Mmaped chunks format](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/head_chunks.md) - main difference is that the chunk for mmaping now also includes series reference because there is no index for mapping series to chunks. [The block chunks](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/chunks.md) are accessed from the index which includes the offsets for the chunks in the chunks file - example - chunks of series ID have offsets 200, 500 etc in the chunk files. In case of mmaped chunks, the offsets are stored in memory and accessed from that. During WAL replay, these offsets are restored by iterating all m-mapped chunks as stated above by matching the series id present in the chunk header and offset of that chunk in that file. **Prombench results** _WAL Replay_ 1h Wal reply time 30% less wal reply time - 4m31 vs 3m36 2h Wal reply time 20% less wal reply time - 8m16 vs 7m _Memory During WAL Replay_ High Churn: 10-15% less RAM - 32gb vs 28gb 20% less RAM after compaction 34gb vs 27gb No Churn: 20-30% less RAM - 23gb vs 18gb 40% less RAM after compaction 32.5gb vs 20gb Screenshots are in [this comment](https://github.com/prometheus/prometheus/pull/6679#issuecomment-621678932) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2020-05-06 08:30:00 -07:00
defer func() {
require.NoError(b, os.RemoveAll(chunkDir))
M-map full chunks of Head from disk (#6679) When appending to the head and a chunk is full it is flushed to the disk and m-mapped (memory mapped) to free up memory Prom startup now happens in these stages - Iterate the m-maped chunks from disk and keep a map of series reference to its slice of mmapped chunks. - Iterate the WAL as usual. Whenever we create a new series, look for it's mmapped chunks in the map created before and add it to that series. If a head chunk is corrupted the currpted one and all chunks after that are deleted and the data after the corruption is recovered from the existing WAL which means that a corruption in m-mapped files results in NO data loss. [Mmaped chunks format](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/head_chunks.md) - main difference is that the chunk for mmaping now also includes series reference because there is no index for mapping series to chunks. [The block chunks](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/chunks.md) are accessed from the index which includes the offsets for the chunks in the chunks file - example - chunks of series ID have offsets 200, 500 etc in the chunk files. In case of mmaped chunks, the offsets are stored in memory and accessed from that. During WAL replay, these offsets are restored by iterating all m-mapped chunks as stated above by matching the series id present in the chunk header and offset of that chunk in that file. **Prombench results** _WAL Replay_ 1h Wal reply time 30% less wal reply time - 4m31 vs 3m36 2h Wal reply time 20% less wal reply time - 8m16 vs 7m _Memory During WAL Replay_ High Churn: 10-15% less RAM - 32gb vs 28gb 20% less RAM after compaction 34gb vs 27gb No Churn: 20-30% less RAM - 23gb vs 18gb 40% less RAM after compaction 32.5gb vs 20gb Screenshots are in [this comment](https://github.com/prometheus/prometheus/pull/6679#issuecomment-621678932) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2020-05-06 08:30:00 -07:00
}()
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = chunkDir
React UI: Add Starting Screen (#8662) * Added walreplay API endpoint Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added starting page to react-ui Signed-off-by: Levi Harrison <git@leviharrison.dev> * Documented the new endpoint Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed typos Signed-off-by: Levi Harrison <git@leviharrison.dev> Co-authored-by: Julius Volz <julius.volz@gmail.com> * Removed logo Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed isResponding to isUnexpected Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed width of progress bar Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed width of progress bar Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added DB stats object Signed-off-by: Levi Harrison <git@leviharrison.dev> * Updated starting page to work with new fields Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 2) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 3) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (and also implementing a method this time) (pt. 4) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (and also implementing a method this time) (pt. 5) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed const to let Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 6) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Remove SetStats method Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added comma Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed api Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed to triple equals Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed data response types Signed-off-by: Levi Harrison <git@leviharrison.dev> * Don't return pointer Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed version Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed interface issue Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed pointer Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed copying lock value error Signed-off-by: Levi Harrison <git@leviharrison.dev> Co-authored-by: Julius Volz <julius.volz@gmail.com>
2021-06-05 07:29:32 -07:00
h, err := NewHead(nil, nil, nil, opts, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, h.Close())
}()
app := h.Appender(context.Background())
addSeries := func(l labels.Labels) {
app.Append(0, l, 0, 0)
}
for n := 0; n < 10; n++ {
for i := 0; i < 100000; i++ {
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", strconv.Itoa(n)+postingsBenchSuffix, "j", "foo"))
// Have some series that won't be matched, to properly test inverted matches.
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", strconv.Itoa(n)+postingsBenchSuffix, "j", "bar"))
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", "0_"+strconv.Itoa(n)+postingsBenchSuffix, "j", "bar"))
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", "1_"+strconv.Itoa(n)+postingsBenchSuffix, "j", "bar"))
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", "2_"+strconv.Itoa(n)+postingsBenchSuffix, "j", "foo"))
}
}
require.NoError(b, app.Commit())
ir, err := h.Index()
require.NoError(b, err)
b.Run("Head", func(b *testing.B) {
benchmarkPostingsForMatchers(b, ir)
})
tmpdir, err := ioutil.TempDir("", "test_benchpostingsformatchers")
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(tmpdir))
}()
blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())
}()
ir, err = block.Index()
require.NoError(b, err)
defer ir.Close()
b.Run("Block", func(b *testing.B) {
benchmarkPostingsForMatchers(b, ir)
})
}
func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
n1 := labels.MustNewMatcher(labels.MatchEqual, "n", "1"+postingsBenchSuffix)
jFoo := labels.MustNewMatcher(labels.MatchEqual, "j", "foo")
jNotFoo := labels.MustNewMatcher(labels.MatchNotEqual, "j", "foo")
iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$")
i1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", "^1.*$")
iStar1 := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*1$")
iStar1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*1.*$")
iPlus := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.+$")
i1Plus := labels.MustNewMatcher(labels.MatchRegexp, "i", "^1.+$")
iEmptyRe := labels.MustNewMatcher(labels.MatchRegexp, "i", "^$")
iNotEmpty := labels.MustNewMatcher(labels.MatchNotEqual, "i", "")
iNot2 := labels.MustNewMatcher(labels.MatchNotEqual, "n", "2"+postingsBenchSuffix)
iNot2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^2.*$")
iNotStar2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.*2.*$")
Adding benchmark. ``` benchstat before.txt after.txt name old time/op new time/op delta PostingsForMatchers/Head/n="1"-16 480ns ± 4% 469ns ± 4% ~ (p=0.095 n=5+5) PostingsForMatchers/Head/n="1",j="foo"-16 720ns ± 1% 705ns ± 2% ~ (p=0.056 n=5+5) PostingsForMatchers/Head/j="foo",n="1"-16 712ns ± 3% 726ns ± 7% ~ (p=0.841 n=5+5) PostingsForMatchers/Head/n="1",j!="foo"-16 833ns ± 3% 827ns ± 1% ~ (p=0.548 n=5+5) PostingsForMatchers/Head/i=~"1[0-9]",j=~"foo|bar"-16 6.66ms ± 4% 0.00ms ± 1% -99.97% (p=0.008 n=5+5) PostingsForMatchers/Head/j=~"foo|bar"-16 897ns ± 2% 675ns ± 2% -24.80% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~"(1|2|3|4|5|6|20|55)"-16 20.1ms ± 2% 0.0ms ± 2% -99.99% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".*"-16 74.4ms ± 3% 81.3ms ± 3% +9.29% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~"1.*"-16 20.3ms ±27% 17.2ms ± 2% -15.15% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".*1"-16 6.22ms ±10% 5.60ms ± 1% -9.95% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".+"-16 146ms ± 4% 126ms ± 1% -13.63% (p=0.016 n=4+5) PostingsForMatchers/Head/i=~""-16 77.0ms ±23% 68.2ms ± 5% -11.47% (p=0.032 n=5+5) PostingsForMatchers/Head/i!=""-16 57.0ms ± 4% 56.9ms ± 6% ~ (p=1.000 n=5+5) PostingsForMatchers/Head/n="1",i=~".*",j="foo"-16 77.1ms ± 2% 79.2ms ± 4% ~ (p=0.095 n=5+5) PostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-16 77.2ms ± 3% 81.2ms ± 3% +5.17% (p=0.016 n=5+5) PostingsForMatchers/Head/n="1",i!=""-16 64.7ms ±34% 58.4ms ± 7% ~ (p=0.421 n=5+5) name old alloc/op new alloc/op delta PostingsForMatchers/Head/n="1"-16 80.0B ± 0% 80.0B ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",j="foo"-16 208B ± 0% 208B ± 0% ~ (all equal) PostingsForMatchers/Head/j="foo",n="1"-16 208B ± 0% 208B ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",j!="foo"-16 240B ± 0% 240B ± 0% ~ (all equal) PostingsForMatchers/Head/i=~"1[0-9]",j=~"foo|bar"-16 1.61MB ± 0% 0.00MB ± 0% ~ (p=0.079 n=4+5) PostingsForMatchers/Head/j=~"foo|bar"-16 360B ± 0% 232B ± 0% -35.56% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~"(1|2|3|4|5|6|20|55)"-16 1.61MB ± 0% 0.00MB ± 0% -99.97% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".*"-16 1.61MB ± 0% 1.61MB ± 0% ~ (all equal) PostingsForMatchers/Head/i=~"1.*"-16 3.15MB ± 0% 3.15MB ± 0% -0.00% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".*1"-16 1.61MB ± 0% 1.61MB ± 0% -0.00% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".+"-16 17.3MB ± 0% 17.3MB ± 0% -0.00% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~""-16 17.3MB ± 0% 17.3MB ± 0% ~ (p=0.238 n=5+5) PostingsForMatchers/Head/i!=""-16 17.3MB ± 0% 17.3MB ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",i=~".*",j="foo"-16 1.61MB ± 0% 1.61MB ± 0% -0.00% (p=0.032 n=5+5) PostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-16 1.61MB ± 0% 1.61MB ± 0% ~ (p=0.651 n=5+4) PostingsForMatchers/Head/n="1",i!=""-16 17.3MB ± 0% 17.3MB ± 0% ~ (all equal) name old allocs/op new allocs/op delta PostingsForMatchers/Head/n="1"-16 4.00 ± 0% 4.00 ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",j="foo"-16 9.00 ± 0% 9.00 ± 0% ~ (all equal) PostingsForMatchers/Head/j="foo",n="1"-16 9.00 ± 0% 9.00 ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",j!="foo"-16 10.0 ± 0% 10.0 ± 0% ~ (all equal) PostingsForMatchers/Head/i=~"1[0-9]",j=~"foo|bar"-16 19.0 ± 0% 23.0 ± 0% +21.05% (p=0.008 n=5+5) PostingsForMatchers/Head/j=~"foo|bar"-16 13.0 ± 0% 7.0 ± 0% -46.15% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~"(1|2|3|4|5|6|20|55)"-16 4.00 ± 0% 13.00 ± 0% +225.00% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".*"-16 7.00 ± 0% 7.00 ± 0% ~ (all equal) PostingsForMatchers/Head/i=~"1.*"-16 11.1k ± 0% 11.1k ± 0% -0.01% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".*1"-16 4.00 ± 0% 3.00 ± 0% -25.00% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".+"-16 100k ± 0% 100k ± 0% -0.00% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~""-16 100k ± 0% 100k ± 0% ~ (all equal) PostingsForMatchers/Head/i!=""-16 100k ± 0% 100k ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",i=~".*",j="foo"-16 12.0 ± 0% 12.0 ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-16 18.0 ± 0% 18.0 ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",i!=""-16 100k ± 0% 100k ± 0% ~ (all equal) ``` Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com>
2021-10-06 05:16:14 -07:00
jFooBar := labels.MustNewMatcher(labels.MatchRegexp, "j", "foo|bar")
iCharSet := labels.MustNewMatcher(labels.MatchRegexp, "i", "1[0-9]")
iAlternate := labels.MustNewMatcher(labels.MatchRegexp, "i", "(1|2|3|4|5|6|20|55)")
cases := []struct {
name string
matchers []*labels.Matcher
}{
{`n="1"`, []*labels.Matcher{n1}},
{`n="1",j="foo"`, []*labels.Matcher{n1, jFoo}},
{`j="foo",n="1"`, []*labels.Matcher{jFoo, n1}},
{`n="1",j!="foo"`, []*labels.Matcher{n1, jNotFoo}},
Adding benchmark. ``` benchstat before.txt after.txt name old time/op new time/op delta PostingsForMatchers/Head/n="1"-16 480ns ± 4% 469ns ± 4% ~ (p=0.095 n=5+5) PostingsForMatchers/Head/n="1",j="foo"-16 720ns ± 1% 705ns ± 2% ~ (p=0.056 n=5+5) PostingsForMatchers/Head/j="foo",n="1"-16 712ns ± 3% 726ns ± 7% ~ (p=0.841 n=5+5) PostingsForMatchers/Head/n="1",j!="foo"-16 833ns ± 3% 827ns ± 1% ~ (p=0.548 n=5+5) PostingsForMatchers/Head/i=~"1[0-9]",j=~"foo|bar"-16 6.66ms ± 4% 0.00ms ± 1% -99.97% (p=0.008 n=5+5) PostingsForMatchers/Head/j=~"foo|bar"-16 897ns ± 2% 675ns ± 2% -24.80% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~"(1|2|3|4|5|6|20|55)"-16 20.1ms ± 2% 0.0ms ± 2% -99.99% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".*"-16 74.4ms ± 3% 81.3ms ± 3% +9.29% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~"1.*"-16 20.3ms ±27% 17.2ms ± 2% -15.15% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".*1"-16 6.22ms ±10% 5.60ms ± 1% -9.95% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".+"-16 146ms ± 4% 126ms ± 1% -13.63% (p=0.016 n=4+5) PostingsForMatchers/Head/i=~""-16 77.0ms ±23% 68.2ms ± 5% -11.47% (p=0.032 n=5+5) PostingsForMatchers/Head/i!=""-16 57.0ms ± 4% 56.9ms ± 6% ~ (p=1.000 n=5+5) PostingsForMatchers/Head/n="1",i=~".*",j="foo"-16 77.1ms ± 2% 79.2ms ± 4% ~ (p=0.095 n=5+5) PostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-16 77.2ms ± 3% 81.2ms ± 3% +5.17% (p=0.016 n=5+5) PostingsForMatchers/Head/n="1",i!=""-16 64.7ms ±34% 58.4ms ± 7% ~ (p=0.421 n=5+5) name old alloc/op new alloc/op delta PostingsForMatchers/Head/n="1"-16 80.0B ± 0% 80.0B ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",j="foo"-16 208B ± 0% 208B ± 0% ~ (all equal) PostingsForMatchers/Head/j="foo",n="1"-16 208B ± 0% 208B ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",j!="foo"-16 240B ± 0% 240B ± 0% ~ (all equal) PostingsForMatchers/Head/i=~"1[0-9]",j=~"foo|bar"-16 1.61MB ± 0% 0.00MB ± 0% ~ (p=0.079 n=4+5) PostingsForMatchers/Head/j=~"foo|bar"-16 360B ± 0% 232B ± 0% -35.56% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~"(1|2|3|4|5|6|20|55)"-16 1.61MB ± 0% 0.00MB ± 0% -99.97% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".*"-16 1.61MB ± 0% 1.61MB ± 0% ~ (all equal) PostingsForMatchers/Head/i=~"1.*"-16 3.15MB ± 0% 3.15MB ± 0% -0.00% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".*1"-16 1.61MB ± 0% 1.61MB ± 0% -0.00% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".+"-16 17.3MB ± 0% 17.3MB ± 0% -0.00% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~""-16 17.3MB ± 0% 17.3MB ± 0% ~ (p=0.238 n=5+5) PostingsForMatchers/Head/i!=""-16 17.3MB ± 0% 17.3MB ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",i=~".*",j="foo"-16 1.61MB ± 0% 1.61MB ± 0% -0.00% (p=0.032 n=5+5) PostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-16 1.61MB ± 0% 1.61MB ± 0% ~ (p=0.651 n=5+4) PostingsForMatchers/Head/n="1",i!=""-16 17.3MB ± 0% 17.3MB ± 0% ~ (all equal) name old allocs/op new allocs/op delta PostingsForMatchers/Head/n="1"-16 4.00 ± 0% 4.00 ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",j="foo"-16 9.00 ± 0% 9.00 ± 0% ~ (all equal) PostingsForMatchers/Head/j="foo",n="1"-16 9.00 ± 0% 9.00 ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",j!="foo"-16 10.0 ± 0% 10.0 ± 0% ~ (all equal) PostingsForMatchers/Head/i=~"1[0-9]",j=~"foo|bar"-16 19.0 ± 0% 23.0 ± 0% +21.05% (p=0.008 n=5+5) PostingsForMatchers/Head/j=~"foo|bar"-16 13.0 ± 0% 7.0 ± 0% -46.15% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~"(1|2|3|4|5|6|20|55)"-16 4.00 ± 0% 13.00 ± 0% +225.00% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".*"-16 7.00 ± 0% 7.00 ± 0% ~ (all equal) PostingsForMatchers/Head/i=~"1.*"-16 11.1k ± 0% 11.1k ± 0% -0.01% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".*1"-16 4.00 ± 0% 3.00 ± 0% -25.00% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~".+"-16 100k ± 0% 100k ± 0% -0.00% (p=0.008 n=5+5) PostingsForMatchers/Head/i=~""-16 100k ± 0% 100k ± 0% ~ (all equal) PostingsForMatchers/Head/i!=""-16 100k ± 0% 100k ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",i=~".*",j="foo"-16 12.0 ± 0% 12.0 ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-16 18.0 ± 0% 18.0 ± 0% ~ (all equal) PostingsForMatchers/Head/n="1",i!=""-16 100k ± 0% 100k ± 0% ~ (all equal) ``` Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com>
2021-10-06 05:16:14 -07:00
{`i=~"1[0-9]",j=~"foo|bar"`, []*labels.Matcher{iCharSet, jFooBar}},
{`j=~"foo|bar"`, []*labels.Matcher{jFooBar}},
{`i=~"(1|2|3|4|5|6|20|55)"`, []*labels.Matcher{iAlternate}},
{`i=~".*"`, []*labels.Matcher{iStar}},
{`i=~"1.*"`, []*labels.Matcher{i1Star}},
{`i=~".*1"`, []*labels.Matcher{iStar1}},
{`i=~".+"`, []*labels.Matcher{iPlus}},
{`i=~""`, []*labels.Matcher{iEmptyRe}},
{`i!=""`, []*labels.Matcher{iNotEmpty}},
{`n="1",i=~".*",j="foo"`, []*labels.Matcher{n1, iStar, jFoo}},
{`n="1",i=~".*",i!="2",j="foo"`, []*labels.Matcher{n1, iStar, iNot2, jFoo}},
{`n="1",i!=""`, []*labels.Matcher{n1, iNotEmpty}},
{`n="1",i!="",j="foo"`, []*labels.Matcher{n1, iNotEmpty, jFoo}},
{`n="1",i=~".+",j="foo"`, []*labels.Matcher{n1, iPlus, jFoo}},
{`n="1",i=~"1.+",j="foo"`, []*labels.Matcher{n1, i1Plus, jFoo}},
{`n="1",i=~".*1.*",j="foo"`, []*labels.Matcher{n1, iStar1Star, jFoo}},
{`n="1",i=~".+",i!="2",j="foo"`, []*labels.Matcher{n1, iPlus, iNot2, jFoo}},
{`n="1",i=~".+",i!~"2.*",j="foo"`, []*labels.Matcher{n1, iPlus, iNot2Star, jFoo}},
{`n="1",i=~".+",i!~".*2.*",j="foo"`, []*labels.Matcher{n1, iPlus, iNotStar2Star, jFoo}},
}
for _, c := range cases {
b.Run(c.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := PostingsForMatchers(ir, c.matchers...)
require.NoError(b, err)
}
})
}
}
func BenchmarkQuerierSelect(b *testing.B) {
M-map full chunks of Head from disk (#6679) When appending to the head and a chunk is full it is flushed to the disk and m-mapped (memory mapped) to free up memory Prom startup now happens in these stages - Iterate the m-maped chunks from disk and keep a map of series reference to its slice of mmapped chunks. - Iterate the WAL as usual. Whenever we create a new series, look for it's mmapped chunks in the map created before and add it to that series. If a head chunk is corrupted the currpted one and all chunks after that are deleted and the data after the corruption is recovered from the existing WAL which means that a corruption in m-mapped files results in NO data loss. [Mmaped chunks format](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/head_chunks.md) - main difference is that the chunk for mmaping now also includes series reference because there is no index for mapping series to chunks. [The block chunks](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/chunks.md) are accessed from the index which includes the offsets for the chunks in the chunks file - example - chunks of series ID have offsets 200, 500 etc in the chunk files. In case of mmaped chunks, the offsets are stored in memory and accessed from that. During WAL replay, these offsets are restored by iterating all m-mapped chunks as stated above by matching the series id present in the chunk header and offset of that chunk in that file. **Prombench results** _WAL Replay_ 1h Wal reply time 30% less wal reply time - 4m31 vs 3m36 2h Wal reply time 20% less wal reply time - 8m16 vs 7m _Memory During WAL Replay_ High Churn: 10-15% less RAM - 32gb vs 28gb 20% less RAM after compaction 34gb vs 27gb No Churn: 20-30% less RAM - 23gb vs 18gb 40% less RAM after compaction 32.5gb vs 20gb Screenshots are in [this comment](https://github.com/prometheus/prometheus/pull/6679#issuecomment-621678932) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2020-05-06 08:30:00 -07:00
chunkDir, err := ioutil.TempDir("", "chunk_dir")
require.NoError(b, err)
M-map full chunks of Head from disk (#6679) When appending to the head and a chunk is full it is flushed to the disk and m-mapped (memory mapped) to free up memory Prom startup now happens in these stages - Iterate the m-maped chunks from disk and keep a map of series reference to its slice of mmapped chunks. - Iterate the WAL as usual. Whenever we create a new series, look for it's mmapped chunks in the map created before and add it to that series. If a head chunk is corrupted the currpted one and all chunks after that are deleted and the data after the corruption is recovered from the existing WAL which means that a corruption in m-mapped files results in NO data loss. [Mmaped chunks format](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/head_chunks.md) - main difference is that the chunk for mmaping now also includes series reference because there is no index for mapping series to chunks. [The block chunks](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/chunks.md) are accessed from the index which includes the offsets for the chunks in the chunks file - example - chunks of series ID have offsets 200, 500 etc in the chunk files. In case of mmaped chunks, the offsets are stored in memory and accessed from that. During WAL replay, these offsets are restored by iterating all m-mapped chunks as stated above by matching the series id present in the chunk header and offset of that chunk in that file. **Prombench results** _WAL Replay_ 1h Wal reply time 30% less wal reply time - 4m31 vs 3m36 2h Wal reply time 20% less wal reply time - 8m16 vs 7m _Memory During WAL Replay_ High Churn: 10-15% less RAM - 32gb vs 28gb 20% less RAM after compaction 34gb vs 27gb No Churn: 20-30% less RAM - 23gb vs 18gb 40% less RAM after compaction 32.5gb vs 20gb Screenshots are in [this comment](https://github.com/prometheus/prometheus/pull/6679#issuecomment-621678932) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2020-05-06 08:30:00 -07:00
defer func() {
require.NoError(b, os.RemoveAll(chunkDir))
M-map full chunks of Head from disk (#6679) When appending to the head and a chunk is full it is flushed to the disk and m-mapped (memory mapped) to free up memory Prom startup now happens in these stages - Iterate the m-maped chunks from disk and keep a map of series reference to its slice of mmapped chunks. - Iterate the WAL as usual. Whenever we create a new series, look for it's mmapped chunks in the map created before and add it to that series. If a head chunk is corrupted the currpted one and all chunks after that are deleted and the data after the corruption is recovered from the existing WAL which means that a corruption in m-mapped files results in NO data loss. [Mmaped chunks format](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/head_chunks.md) - main difference is that the chunk for mmaping now also includes series reference because there is no index for mapping series to chunks. [The block chunks](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/chunks.md) are accessed from the index which includes the offsets for the chunks in the chunks file - example - chunks of series ID have offsets 200, 500 etc in the chunk files. In case of mmaped chunks, the offsets are stored in memory and accessed from that. During WAL replay, these offsets are restored by iterating all m-mapped chunks as stated above by matching the series id present in the chunk header and offset of that chunk in that file. **Prombench results** _WAL Replay_ 1h Wal reply time 30% less wal reply time - 4m31 vs 3m36 2h Wal reply time 20% less wal reply time - 8m16 vs 7m _Memory During WAL Replay_ High Churn: 10-15% less RAM - 32gb vs 28gb 20% less RAM after compaction 34gb vs 27gb No Churn: 20-30% less RAM - 23gb vs 18gb 40% less RAM after compaction 32.5gb vs 20gb Screenshots are in [this comment](https://github.com/prometheus/prometheus/pull/6679#issuecomment-621678932) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
2020-05-06 08:30:00 -07:00
}()
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = chunkDir
React UI: Add Starting Screen (#8662) * Added walreplay API endpoint Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added starting page to react-ui Signed-off-by: Levi Harrison <git@leviharrison.dev> * Documented the new endpoint Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed typos Signed-off-by: Levi Harrison <git@leviharrison.dev> Co-authored-by: Julius Volz <julius.volz@gmail.com> * Removed logo Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed isResponding to isUnexpected Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed width of progress bar Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed width of progress bar Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added DB stats object Signed-off-by: Levi Harrison <git@leviharrison.dev> * Updated starting page to work with new fields Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 2) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 3) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (and also implementing a method this time) (pt. 4) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (and also implementing a method this time) (pt. 5) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed const to let Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 6) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Remove SetStats method Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added comma Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed api Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed to triple equals Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed data response types Signed-off-by: Levi Harrison <git@leviharrison.dev> * Don't return pointer Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed version Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed interface issue Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed pointer Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed copying lock value error Signed-off-by: Levi Harrison <git@leviharrison.dev> Co-authored-by: Julius Volz <julius.volz@gmail.com>
2021-06-05 07:29:32 -07:00
h, err := NewHead(nil, nil, nil, opts, nil)
require.NoError(b, err)
defer h.Close()
app := h.Appender(context.Background())
numSeries := 1000000
for i := 0; i < numSeries; i++ {
app.Append(0, labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0)
}
require.NoError(b, app.Commit())
bench := func(b *testing.B, br BlockReader, sorted, sharding bool) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")
for s := 1; s <= numSeries; s *= 10 {
b.Run(fmt.Sprintf("%dof%d", s, numSeries), func(b *testing.B) {
mint := int64(0)
maxt := int64(s - 1)
q, err := NewBlockQuerier(br, mint, maxt)
require.NoError(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
var hints *storage.SelectHints
if sharding {
hints = &storage.SelectHints{
Start: mint,
End: maxt,
ShardIndex: uint64(i % 16),
ShardCount: 16,
}
}
ss := q.Select(sorted, hints, matcher)
for ss.Next() {
}
require.NoError(b, ss.Err())
}
q.Close()
})
}
}
b.Run("Head", func(b *testing.B) {
b.Run("without sharding", func(b *testing.B) {
bench(b, h, false, false)
})
b.Run("with sharding", func(b *testing.B) {
bench(b, h, false, true)
})
})
b.Run("SortedHead", func(b *testing.B) {
b.Run("without sharding", func(b *testing.B) {
bench(b, h, true, false)
})
b.Run("with sharding", func(b *testing.B) {
bench(b, h, true, true)
})
})
tmpdir, err := ioutil.TempDir("", "test_benchquerierselect")
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(tmpdir))
}()
seriesHashCache := hashcache.NewSeriesHashCache(1024 * 1024 * 1024)
blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlockWithCache(nil, blockdir, nil, seriesHashCache.GetBlockCacheProvider("test"))
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())
}()
b.Run("Block", func(b *testing.B) {
b.Run("without sharding", func(b *testing.B) {
bench(b, block, false, false)
})
b.Run("with sharding", func(b *testing.B) {
bench(b, block, false, true)
})
})
}