mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-15 01:54:06 -08:00
c1b669bf9b
* Introduce out-of-order TSDB support This implementation is based on this design doc: https://docs.google.com/document/d/1Kppm7qL9C-BJB1j6yb6-9ObG3AbdZnFUBYPNNWwDBYM/edit?usp=sharing This commit adds support to accept out-of-order ("OOO") sample into the TSDB up to a configurable time allowance. If OOO is enabled, overlapping querying are automatically enabled. Most of the additions have been borrowed from https://github.com/grafana/mimir-prometheus/ Here is the list ist of the original commits cherry picked from mimir-prometheus into this branch: -4b2198d7ec
-2836e5513f
-00b379c3a5
-ff0dc75758
-a632c73352
-c6f3d4ab33
-5e8406a1d4
-abde1e0ba1
-e70e769889
-df59320886
Co-authored-by: Jesus Vazquez <jesus.vazquez@grafana.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com> Co-authored-by: Dieter Plaetinck <dieter@grafana.com> Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * gofumpt files Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Add license header to missing files Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Fix OOO tests due to existing chunk disk mapper implementation Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Fix truncate int overflow Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Add Sync method to the WAL and update tests Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * remove useless sync Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Update minOOOTime after truncating Head * Update minOOOTime after truncating Head Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> * Fix lint Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> * Add a unit test Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Load OutOfOrderTimeWindow only once per appender Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Fix OOO Head LabelValues and PostingsForMatchers Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Fix replay of OOO mmap chunks Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> * Remove unnecessary err check Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Prevent panic with ApplyConfig Signed-off-by: Ganesh Vernekar 15064823+codesome@users.noreply.github.com Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Run OOO compaction after restart if there is OOO data from WBL Signed-off-by: Ganesh Vernekar 15064823+codesome@users.noreply.github.com Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Apply Bartek's suggestions Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Refactor OOO compaction Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> * Address comments and TODOs - Added a comment explaining why we need the allow overlapping compaction toggle - Clarified TSDBConfig OutOfOrderTimeWindow doc - Added an owner to all the TODOs in the code Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Run go format Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Fix remaining review comments Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> * Fix tests Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> * Change wbl reference when truncating ooo in TestHeadMinOOOTimeUpdate Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> * Fix TestWBLAndMmapReplay test failure on windows Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> * Address most of the feedback Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> * Refactor the block meta for out of order Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> * Fix windows error Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> * Fix review comments Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Jesus Vazquez <jesus.vazquez@grafana.com> Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Ganesh Vernekar 15064823+codesome@users.noreply.github.com Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com> Co-authored-by: Dieter Plaetinck <dieter@grafana.com> Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
232 lines
7.7 KiB
Go
232 lines
7.7 KiB
Go
// Copyright 2018 The Prometheus Authors
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package tsdb
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strconv"
|
|
"testing"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
|
)
|
|
|
|
// Make entries ~50B in size, to emulate real-world high cardinality.
|
|
const (
|
|
postingsBenchSuffix = "aaaaaaaaaabbbbbbbbbbccccccccccdddddddddd"
|
|
)
|
|
|
|
func BenchmarkQuerier(b *testing.B) {
|
|
chunkDir := b.TempDir()
|
|
opts := DefaultHeadOptions()
|
|
opts.ChunkRange = 1000
|
|
opts.ChunkDirRoot = chunkDir
|
|
h, err := NewHead(nil, nil, nil, nil, opts, nil)
|
|
require.NoError(b, err)
|
|
defer func() {
|
|
require.NoError(b, h.Close())
|
|
}()
|
|
|
|
app := h.Appender(context.Background())
|
|
addSeries := func(l labels.Labels) {
|
|
app.Append(0, l, 0, 0)
|
|
}
|
|
|
|
for n := 0; n < 10; n++ {
|
|
for i := 0; i < 100000; i++ {
|
|
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", strconv.Itoa(n)+postingsBenchSuffix, "j", "foo"))
|
|
// Have some series that won't be matched, to properly test inverted matches.
|
|
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", strconv.Itoa(n)+postingsBenchSuffix, "j", "bar"))
|
|
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", "0_"+strconv.Itoa(n)+postingsBenchSuffix, "j", "bar"))
|
|
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", "1_"+strconv.Itoa(n)+postingsBenchSuffix, "j", "bar"))
|
|
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", "2_"+strconv.Itoa(n)+postingsBenchSuffix, "j", "foo"))
|
|
}
|
|
}
|
|
require.NoError(b, app.Commit())
|
|
|
|
ir, err := h.Index()
|
|
require.NoError(b, err)
|
|
b.Run("Head", func(b *testing.B) {
|
|
b.Run("PostingsForMatchers", func(b *testing.B) {
|
|
benchmarkPostingsForMatchers(b, ir)
|
|
})
|
|
b.Run("labelValuesWithMatchers", func(b *testing.B) {
|
|
benchmarkLabelValuesWithMatchers(b, ir)
|
|
})
|
|
})
|
|
|
|
tmpdir := b.TempDir()
|
|
|
|
blockdir := createBlockFromHead(b, tmpdir, h)
|
|
block, err := OpenBlock(nil, blockdir, nil)
|
|
require.NoError(b, err)
|
|
defer func() {
|
|
require.NoError(b, block.Close())
|
|
}()
|
|
ir, err = block.Index()
|
|
require.NoError(b, err)
|
|
defer ir.Close()
|
|
b.Run("Block", func(b *testing.B) {
|
|
b.Run("PostingsForMatchers", func(b *testing.B) {
|
|
benchmarkPostingsForMatchers(b, ir)
|
|
})
|
|
b.Run("labelValuesWithMatchers", func(b *testing.B) {
|
|
benchmarkLabelValuesWithMatchers(b, ir)
|
|
})
|
|
})
|
|
}
|
|
|
|
func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
|
|
n1 := labels.MustNewMatcher(labels.MatchEqual, "n", "1"+postingsBenchSuffix)
|
|
|
|
jFoo := labels.MustNewMatcher(labels.MatchEqual, "j", "foo")
|
|
jNotFoo := labels.MustNewMatcher(labels.MatchNotEqual, "j", "foo")
|
|
|
|
iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$")
|
|
i1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", "^1.*$")
|
|
iStar1 := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*1$")
|
|
iStar1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*1.*$")
|
|
iPlus := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.+$")
|
|
i1Plus := labels.MustNewMatcher(labels.MatchRegexp, "i", "^1.+$")
|
|
iEmptyRe := labels.MustNewMatcher(labels.MatchRegexp, "i", "^$")
|
|
iNotEmpty := labels.MustNewMatcher(labels.MatchNotEqual, "i", "")
|
|
iNot2 := labels.MustNewMatcher(labels.MatchNotEqual, "i", "2"+postingsBenchSuffix)
|
|
iNot2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^2.*$")
|
|
iNotStar2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.*2.*$")
|
|
|
|
cases := []struct {
|
|
name string
|
|
matchers []*labels.Matcher
|
|
}{
|
|
{`n="1"`, []*labels.Matcher{n1}},
|
|
{`n="1",j="foo"`, []*labels.Matcher{n1, jFoo}},
|
|
{`j="foo",n="1"`, []*labels.Matcher{jFoo, n1}},
|
|
{`n="1",j!="foo"`, []*labels.Matcher{n1, jNotFoo}},
|
|
{`i=~".*"`, []*labels.Matcher{iStar}},
|
|
{`i=~"1.*"`, []*labels.Matcher{i1Star}},
|
|
{`i=~".*1"`, []*labels.Matcher{iStar1}},
|
|
{`i=~".+"`, []*labels.Matcher{iPlus}},
|
|
{`i=~""`, []*labels.Matcher{iEmptyRe}},
|
|
{`i!=""`, []*labels.Matcher{iNotEmpty}},
|
|
{`n="1",i=~".*",j="foo"`, []*labels.Matcher{n1, iStar, jFoo}},
|
|
{`n="1",i=~".*",i!="2",j="foo"`, []*labels.Matcher{n1, iStar, iNot2, jFoo}},
|
|
{`n="1",i!=""`, []*labels.Matcher{n1, iNotEmpty}},
|
|
{`n="1",i!="",j="foo"`, []*labels.Matcher{n1, iNotEmpty, jFoo}},
|
|
{`n="1",i=~".+",j="foo"`, []*labels.Matcher{n1, iPlus, jFoo}},
|
|
{`n="1",i=~"1.+",j="foo"`, []*labels.Matcher{n1, i1Plus, jFoo}},
|
|
{`n="1",i=~".*1.*",j="foo"`, []*labels.Matcher{n1, iStar1Star, jFoo}},
|
|
{`n="1",i=~".+",i!="2",j="foo"`, []*labels.Matcher{n1, iPlus, iNot2, jFoo}},
|
|
{`n="1",i=~".+",i!~"2.*",j="foo"`, []*labels.Matcher{n1, iPlus, iNot2Star, jFoo}},
|
|
{`n="1",i=~".+",i!~".*2.*",j="foo"`, []*labels.Matcher{n1, iPlus, iNotStar2Star, jFoo}},
|
|
}
|
|
|
|
for _, c := range cases {
|
|
b.Run(c.name, func(b *testing.B) {
|
|
for i := 0; i < b.N; i++ {
|
|
_, err := PostingsForMatchers(ir, c.matchers...)
|
|
require.NoError(b, err)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) {
|
|
i1 := labels.MustNewMatcher(labels.MatchEqual, "i", "1")
|
|
iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$")
|
|
jNotFoo := labels.MustNewMatcher(labels.MatchNotEqual, "j", "foo")
|
|
n1 := labels.MustNewMatcher(labels.MatchEqual, "n", "1"+postingsBenchSuffix)
|
|
nPlus := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.+$")
|
|
|
|
cases := []struct {
|
|
name string
|
|
labelName string
|
|
matchers []*labels.Matcher
|
|
}{
|
|
// i has 100k values.
|
|
{`i with n="1"`, "i", []*labels.Matcher{n1}},
|
|
{`i with n="^.+$"`, "i", []*labels.Matcher{nPlus}},
|
|
{`i with n="1",j!="foo"`, "i", []*labels.Matcher{n1, jNotFoo}},
|
|
{`i with n="1",i=~"^.*$",j!="foo"`, "i", []*labels.Matcher{n1, iStar, jNotFoo}},
|
|
// n has 10 values.
|
|
{`n with j!="foo"`, "n", []*labels.Matcher{jNotFoo}},
|
|
{`n with i="1"`, "n", []*labels.Matcher{i1}},
|
|
}
|
|
|
|
for _, c := range cases {
|
|
b.Run(c.name, func(b *testing.B) {
|
|
for i := 0; i < b.N; i++ {
|
|
_, err := labelValuesWithMatchers(ir, c.labelName, c.matchers...)
|
|
require.NoError(b, err)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func BenchmarkQuerierSelect(b *testing.B) {
|
|
chunkDir := b.TempDir()
|
|
opts := DefaultHeadOptions()
|
|
opts.ChunkRange = 1000
|
|
opts.ChunkDirRoot = chunkDir
|
|
h, err := NewHead(nil, nil, nil, nil, opts, nil)
|
|
require.NoError(b, err)
|
|
defer h.Close()
|
|
app := h.Appender(context.Background())
|
|
numSeries := 1000000
|
|
for i := 0; i < numSeries; i++ {
|
|
app.Append(0, labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0)
|
|
}
|
|
require.NoError(b, app.Commit())
|
|
|
|
bench := func(b *testing.B, br BlockReader, sorted bool) {
|
|
matcher := labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")
|
|
for s := 1; s <= numSeries; s *= 10 {
|
|
b.Run(fmt.Sprintf("%dof%d", s, numSeries), func(b *testing.B) {
|
|
q, err := NewBlockQuerier(br, 0, int64(s-1))
|
|
require.NoError(b, err)
|
|
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
ss := q.Select(sorted, nil, matcher)
|
|
for ss.Next() {
|
|
}
|
|
require.NoError(b, ss.Err())
|
|
}
|
|
q.Close()
|
|
})
|
|
}
|
|
}
|
|
|
|
b.Run("Head", func(b *testing.B) {
|
|
bench(b, h, false)
|
|
})
|
|
b.Run("SortedHead", func(b *testing.B) {
|
|
bench(b, h, true)
|
|
})
|
|
|
|
tmpdir := b.TempDir()
|
|
|
|
blockdir := createBlockFromHead(b, tmpdir, h)
|
|
block, err := OpenBlock(nil, blockdir, nil)
|
|
require.NoError(b, err)
|
|
defer func() {
|
|
require.NoError(b, block.Close())
|
|
}()
|
|
|
|
b.Run("Block", func(b *testing.B) {
|
|
bench(b, block, false)
|
|
})
|
|
}
|