2017-04-10 11:59:45 -07:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2016-12-04 04:16:11 -08:00
package tsdb
2017-04-18 09:22:13 -07:00
import (
2020-07-21 00:02:13 -07:00
"bufio"
2020-02-06 07:58:38 -08:00
"context"
2019-12-03 23:37:49 -08:00
"encoding/binary"
2018-06-04 05:35:36 -07:00
"fmt"
2019-12-03 23:37:49 -08:00
"hash/crc32"
2017-04-18 09:22:13 -07:00
"io/ioutil"
2017-08-28 15:39:17 -07:00
"math"
2017-05-23 05:43:30 -07:00
"math/rand"
2017-04-18 09:22:13 -07:00
"os"
2018-05-25 14:19:32 -07:00
"path"
2018-05-28 13:00:36 -07:00
"path/filepath"
2017-08-28 15:39:17 -07:00
"sort"
2019-12-03 23:37:49 -08:00
"strconv"
2021-05-06 13:18:59 -07:00
"strings"
2019-12-24 13:55:22 -08:00
"sync"
2017-04-18 09:22:13 -07:00
"testing"
2018-06-04 05:35:36 -07:00
"time"
2017-04-18 09:22:13 -07:00
2021-06-11 09:17:59 -07:00
"github.com/go-kit/log"
2018-05-28 13:00:36 -07:00
"github.com/oklog/ulid"
2017-05-17 07:43:01 -07:00
"github.com/pkg/errors"
2018-11-28 01:23:50 -08:00
"github.com/prometheus/client_golang/prometheus"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
2020-10-29 02:43:23 -07:00
"github.com/stretchr/testify/require"
2020-10-22 02:00:08 -07:00
"go.uber.org/goleak"
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/labels"
2020-02-06 07:58:38 -08:00
"github.com/prometheus/prometheus/storage"
2021-05-06 13:18:59 -07:00
"github.com/prometheus/prometheus/tsdb/chunkenc"
2019-08-13 01:34:14 -07:00
"github.com/prometheus/prometheus/tsdb/chunks"
2020-02-06 07:58:38 -08:00
"github.com/prometheus/prometheus/tsdb/fileutil"
2019-08-13 01:34:14 -07:00
"github.com/prometheus/prometheus/tsdb/index"
2019-09-19 02:15:41 -07:00
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
2019-08-13 01:34:14 -07:00
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wal"
2019-08-14 02:07:02 -07:00
"github.com/prometheus/prometheus/util/testutil"
2017-04-18 09:22:13 -07:00
)
2020-07-21 01:08:06 -07:00
func TestMain ( m * testing . M ) {
2021-08-28 19:42:22 -07:00
goleak . VerifyTestMain ( m , goleak . IgnoreTopFunction ( "github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func1" ) , goleak . IgnoreTopFunction ( "github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2" ) )
2020-07-21 01:08:06 -07:00
}
2020-07-21 01:39:02 -07:00
func openTestDB ( t testing . TB , opts * Options , rngs [ ] int64 ) ( db * DB ) {
2017-11-21 03:15:02 -08:00
tmpdir , err := ioutil . TempDir ( "" , "test" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-08-28 15:39:17 -07:00
2020-02-06 07:58:38 -08:00
if len ( rngs ) == 0 {
2021-06-05 07:29:32 -07:00
db , err = Open ( tmpdir , nil , nil , opts , nil )
2020-02-06 07:58:38 -08:00
} else {
opts , rngs = validateOpts ( opts , rngs )
2021-06-05 07:29:32 -07:00
db , err = open ( tmpdir , nil , nil , opts , rngs , nil )
2020-02-06 07:58:38 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-08-28 15:39:17 -07:00
2020-07-21 01:39:02 -07:00
// Do not Close() the test database by default as it will deadlock on test failures.
t . Cleanup ( func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( tmpdir ) )
2020-07-21 01:39:02 -07:00
} )
return db
2017-08-28 15:39:17 -07:00
}
2017-11-13 03:16:58 -08:00
// query runs a matcher query against the querier and fully expands its data.
2020-02-06 07:58:38 -08:00
func query ( t testing . TB , q storage . Querier , matchers ... * labels . Matcher ) map [ string ] [ ] tsdbutil . Sample {
2020-06-09 09:57:31 -07:00
ss := q . Select ( false , nil , matchers ... )
2019-02-14 05:29:41 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , q . Close ( ) )
2019-02-14 05:29:41 -08:00
} ( )
2017-11-13 03:16:58 -08:00
2019-02-14 05:29:41 -08:00
result := map [ string ] [ ] tsdbutil . Sample { }
2017-04-20 06:24:35 -07:00
for ss . Next ( ) {
series := ss . At ( )
2019-02-14 05:29:41 -08:00
samples := [ ] tsdbutil . Sample { }
2017-04-20 06:24:35 -07:00
it := series . Iterator ( )
for it . Next ( ) {
t , v := it . At ( )
samples = append ( samples , sample { t : t , v : v } )
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , it . Err ( ) )
2017-04-20 06:24:35 -07:00
2020-02-12 11:22:27 -08:00
if len ( samples ) == 0 {
continue
}
2017-04-20 06:24:35 -07:00
name := series . Labels ( ) . String ( )
result [ name ] = samples
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , ss . Err ( ) )
require . Equal ( t , 0 , len ( ss . Warnings ( ) ) )
2017-08-28 15:39:17 -07:00
return result
2017-04-20 06:24:35 -07:00
}
2020-07-31 08:03:02 -07:00
// queryChunks runs a matcher query against the querier and fully expands its data.
func queryChunks ( t testing . TB , q storage . ChunkQuerier , matchers ... * labels . Matcher ) map [ string ] [ ] chunks . Meta {
ss := q . Select ( false , nil , matchers ... )
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , q . Close ( ) )
2020-07-31 08:03:02 -07:00
} ( )
result := map [ string ] [ ] chunks . Meta { }
for ss . Next ( ) {
series := ss . At ( )
chks := [ ] chunks . Meta { }
it := series . Iterator ( )
for it . Next ( ) {
chks = append ( chks , it . At ( ) )
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , it . Err ( ) )
2020-07-31 08:03:02 -07:00
if len ( chks ) == 0 {
continue
}
name := series . Labels ( ) . String ( )
result [ name ] = chks
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , ss . Err ( ) )
require . Equal ( t , 0 , len ( ss . Warnings ( ) ) )
2020-07-31 08:03:02 -07:00
return result
}
2018-05-28 13:00:36 -07:00
// Ensure that blocks are held in memory in their time order
// and not in ULID order as they are read from the directory.
func TestDB_reloadOrder ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2018-05-28 13:00:36 -07:00
2018-12-29 03:20:51 -08:00
metas := [ ] BlockMeta {
{ MinTime : 90 , MaxTime : 100 } ,
{ MinTime : 70 , MaxTime : 80 } ,
{ MinTime : 100 , MaxTime : 110 } ,
2018-05-28 13:00:36 -07:00
}
for _ , m := range metas {
2019-01-28 03:24:49 -08:00
createBlock ( t , db . Dir ( ) , genSeries ( 1 , 1 , m . MinTime , m . MaxTime ) )
2018-05-28 13:00:36 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . reloadBlocks ( ) )
2018-05-28 13:00:36 -07:00
blocks := db . Blocks ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 3 , len ( blocks ) )
require . Equal ( t , metas [ 1 ] . MinTime , blocks [ 0 ] . Meta ( ) . MinTime )
require . Equal ( t , metas [ 1 ] . MaxTime , blocks [ 0 ] . Meta ( ) . MaxTime )
require . Equal ( t , metas [ 0 ] . MinTime , blocks [ 1 ] . Meta ( ) . MinTime )
require . Equal ( t , metas [ 0 ] . MaxTime , blocks [ 1 ] . Meta ( ) . MaxTime )
require . Equal ( t , metas [ 2 ] . MinTime , blocks [ 2 ] . Meta ( ) . MinTime )
require . Equal ( t , metas [ 2 ] . MaxTime , blocks [ 2 ] . Meta ( ) . MaxTime )
2018-05-28 13:00:36 -07:00
}
2017-04-18 09:22:13 -07:00
func TestDataAvailableOnlyAfterCommit ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-04-18 09:22:13 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2017-08-28 15:39:17 -07:00
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-18 09:22:13 -07:00
2020-02-06 07:58:38 -08:00
querier , err := db . Querier ( context . TODO ( ) , 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-11-18 11:53:33 -08:00
seriesSet := query ( t , querier , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] [ ] tsdbutil . Sample { } , seriesSet )
2017-04-18 09:22:13 -07:00
err = app . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-18 09:22:13 -07:00
2020-02-06 07:58:38 -08:00
querier , err = db . Querier ( context . TODO ( ) , 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-18 09:22:13 -07:00
defer querier . Close ( )
2019-11-18 11:53:33 -08:00
seriesSet = query ( t , querier , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2017-08-28 15:39:17 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] [ ] tsdbutil . Sample { ` { foo="bar"} ` : { sample { t : 0 , v : 0 } } } , seriesSet )
2017-04-18 09:22:13 -07:00
}
2020-11-19 05:00:47 -08:00
// TestNoPanicAfterWALCorruption ensures that querying the db after a WAL corruption doesn't cause a panic.
2020-07-21 00:02:13 -07:00
// https://github.com/prometheus/prometheus/issues/7548
2020-11-19 05:00:47 -08:00
func TestNoPanicAfterWALCorruption ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , & Options { WALSegmentSize : 32 * 1024 } , nil )
2020-07-21 00:02:13 -07:00
2020-09-20 10:42:01 -07:00
// Append until the first mmaped head chunk.
2020-07-21 00:02:13 -07:00
// This is to ensure that all samples can be read from the mmaped chunks when the WAL is corrupted.
var expSamples [ ] tsdbutil . Sample
var maxt int64
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
2020-07-21 00:02:13 -07:00
{
for {
2020-07-24 07:10:51 -07:00
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , maxt , 0 )
2020-07-21 00:02:13 -07:00
expSamples = append ( expSamples , sample { t : maxt , v : 0 } )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2020-07-21 00:02:13 -07:00
mmapedChunks , err := ioutil . ReadDir ( mmappedChunksDir ( db . Dir ( ) ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-21 00:02:13 -07:00
if len ( mmapedChunks ) > 0 {
break
}
maxt ++
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-07-21 00:02:13 -07:00
}
// Corrupt the WAL after the first sample of the series so that it has at least one sample and
// it is not garbage collected.
// The repair deletes all WAL records after the corrupted record and these are read from the mmaped chunk.
{
walFiles , err := ioutil . ReadDir ( path . Join ( db . Dir ( ) , "wal" ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-10-22 01:06:44 -07:00
f , err := os . OpenFile ( path . Join ( db . Dir ( ) , "wal" , walFiles [ 0 ] . Name ( ) ) , os . O_RDWR , 0 o666 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-21 00:02:13 -07:00
r := wal . NewReader ( bufio . NewReader ( f ) )
2020-10-29 02:43:23 -07:00
require . True ( t , r . Next ( ) , "reading the series record" )
require . True ( t , r . Next ( ) , "reading the first sample record" )
2020-07-21 00:02:13 -07:00
// Write an invalid record header to corrupt everything after the first wal sample.
_ , err = f . WriteAt ( [ ] byte { 99 } , r . Offset ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-21 00:02:13 -07:00
f . Close ( )
}
// Query the data.
{
2021-06-05 07:29:32 -07:00
db , err := Open ( db . Dir ( ) , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-21 00:02:13 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-07-21 00:02:13 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . walCorruptionsTotal ) , "WAL corruption count mismatch" )
2020-07-21 00:02:13 -07:00
querier , err := db . Querier ( context . TODO ( ) , 0 , maxt )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-21 00:02:13 -07:00
seriesSet := query ( t , querier , labels . MustNewMatcher ( labels . MatchEqual , "" , "" ) )
// The last sample should be missing as it was after the WAL segment corruption.
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] [ ] tsdbutil . Sample { ` { foo="bar"} ` : expSamples [ 0 : len ( expSamples ) - 1 ] } , seriesSet )
2020-07-21 00:02:13 -07:00
}
}
2017-04-18 09:22:13 -07:00
func TestDataNotAvailableAfterRollback ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-04-18 09:22:13 -07:00
2020-07-24 07:10:51 -07:00
app := db . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-18 09:22:13 -07:00
err = app . Rollback ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-18 09:22:13 -07:00
2020-02-06 07:58:38 -08:00
querier , err := db . Querier ( context . TODO ( ) , 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-18 09:22:13 -07:00
defer querier . Close ( )
2019-11-18 11:53:33 -08:00
seriesSet := query ( t , querier , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2017-08-28 15:39:17 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] [ ] tsdbutil . Sample { } , seriesSet )
2017-04-18 09:22:13 -07:00
}
2017-04-28 06:24:28 -07:00
func TestDBAppenderAddRef ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-04-28 06:24:28 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app1 := db . Appender ( ctx )
2017-04-28 06:24:28 -07:00
2021-02-18 04:07:00 -08:00
ref1 , err := app1 . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 123 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-28 06:24:28 -07:00
2017-09-05 02:45:18 -07:00
// Reference should already work before commit.
2021-02-18 04:07:00 -08:00
ref2 , err := app1 . Append ( ref1 , nil , 124 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
require . Equal ( t , ref1 , ref2 )
2017-05-17 07:43:01 -07:00
2017-05-18 07:09:30 -07:00
err = app1 . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-05-17 07:43:01 -07:00
2020-07-24 07:10:51 -07:00
app2 := db . Appender ( ctx )
2017-09-05 02:45:18 -07:00
// first ref should already work in next transaction.
2021-02-18 04:07:00 -08:00
ref3 , err := app2 . Append ( ref1 , nil , 125 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
require . Equal ( t , ref1 , ref3 )
2017-05-17 07:43:01 -07:00
2021-02-18 04:07:00 -08:00
ref4 , err := app2 . Append ( ref1 , labels . FromStrings ( "a" , "b" ) , 133 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
require . Equal ( t , ref1 , ref4 )
2017-09-05 02:45:18 -07:00
2017-04-28 06:24:28 -07:00
// Reference must be valid to add another sample.
2021-02-18 04:07:00 -08:00
ref5 , err := app2 . Append ( ref2 , nil , 143 , 2 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
require . Equal ( t , ref1 , ref5 )
2017-04-28 06:24:28 -07:00
2021-02-18 04:07:00 -08:00
// Missing labels & invalid refs should fail.
_ , err = app2 . Append ( 9999999 , nil , 1 , 1 )
require . Equal ( t , ErrInvalidSample , errors . Cause ( err ) )
2017-08-28 15:39:17 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , app2 . Commit ( ) )
2017-08-28 15:39:17 -07:00
2020-02-06 07:58:38 -08:00
q , err := db . Querier ( context . TODO ( ) , 0 , 200 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-10-09 06:21:46 -07:00
2019-11-18 11:53:33 -08:00
res := query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2017-08-28 15:39:17 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] [ ] tsdbutil . Sample {
2018-05-07 05:39:54 -07:00
labels . FromStrings ( "a" , "b" ) . String ( ) : {
2019-02-14 05:29:41 -08:00
sample { t : 123 , v : 0 } ,
sample { t : 124 , v : 1 } ,
sample { t : 125 , v : 0 } ,
sample { t : 133 , v : 1 } ,
sample { t : 143 , v : 2 } ,
2017-08-28 15:39:17 -07:00
} ,
} , res )
2017-04-28 06:24:28 -07:00
}
2017-05-23 05:43:30 -07:00
2019-05-07 03:00:16 -07:00
func TestAppendEmptyLabelsIgnored ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-05-07 03:00:16 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-05-07 03:00:16 -07:00
} ( )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app1 := db . Appender ( ctx )
2019-05-07 03:00:16 -07:00
2021-02-18 04:07:00 -08:00
ref1 , err := app1 . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 123 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-05-07 03:00:16 -07:00
// Construct labels manually so there is an empty label.
2021-02-18 04:07:00 -08:00
ref2 , err := app1 . Append ( 0 , labels . Labels { labels . Label { Name : "a" , Value : "b" } , labels . Label { Name : "c" , Value : "" } } , 124 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-05-07 03:00:16 -07:00
// Should be the same series.
2020-10-29 02:43:23 -07:00
require . Equal ( t , ref1 , ref2 )
2019-05-07 03:00:16 -07:00
err = app1 . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-05-07 03:00:16 -07:00
}
2017-05-23 05:43:30 -07:00
func TestDeleteSimple ( t * testing . T ) {
numSamples := int64 ( 10 )
cases := [ ] struct {
2019-09-19 02:15:41 -07:00
Intervals tombstones . Intervals
2017-05-23 05:43:30 -07:00
remaint [ ] int64
} {
2019-01-08 09:08:41 -08:00
{
2019-09-19 02:15:41 -07:00
Intervals : tombstones . Intervals { { Mint : 0 , Maxt : 3 } } ,
2019-01-08 09:08:41 -08:00
remaint : [ ] int64 { 4 , 5 , 6 , 7 , 8 , 9 } ,
} ,
{
2019-09-19 02:15:41 -07:00
Intervals : tombstones . Intervals { { Mint : 1 , Maxt : 3 } } ,
2019-01-08 09:08:41 -08:00
remaint : [ ] int64 { 0 , 4 , 5 , 6 , 7 , 8 , 9 } ,
} ,
2017-05-23 05:43:30 -07:00
{
2019-09-19 02:15:41 -07:00
Intervals : tombstones . Intervals { { Mint : 1 , Maxt : 3 } , { Mint : 4 , Maxt : 7 } } ,
2017-05-23 05:43:30 -07:00
remaint : [ ] int64 { 0 , 8 , 9 } ,
} ,
2019-01-08 09:08:41 -08:00
{
2019-09-19 02:15:41 -07:00
Intervals : tombstones . Intervals { { Mint : 1 , Maxt : 3 } , { Mint : 4 , Maxt : 700 } } ,
2019-01-08 09:08:41 -08:00
remaint : [ ] int64 { 0 } ,
} ,
{ // This case is to ensure that labels and symbols are deleted.
2019-09-19 02:15:41 -07:00
Intervals : tombstones . Intervals { { Mint : 0 , Maxt : 9 } } ,
2019-01-08 09:08:41 -08:00
remaint : [ ] int64 { } ,
} ,
2017-05-23 05:43:30 -07:00
}
Outer :
for _ , c := range cases {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-02-08 03:26:28 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-02-08 03:26:28 -08:00
} ( )
2019-01-08 09:08:41 -08:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2019-01-08 09:08:41 -08:00
smpls := make ( [ ] float64 , numSamples )
for i := int64 ( 0 ) ; i < numSamples ; i ++ {
smpls [ i ] = rand . Float64 ( )
2021-02-18 04:07:00 -08:00
app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , i , smpls [ i ] )
2019-01-08 09:08:41 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2019-01-08 09:08:41 -08:00
2017-05-23 05:43:30 -07:00
// TODO(gouthamve): Reset the tombstones somehow.
// Delete the ranges.
2019-09-19 02:15:41 -07:00
for _ , r := range c . Intervals {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Delete ( r . Mint , r . Maxt , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ) )
2017-05-23 05:43:30 -07:00
}
// Compare the result.
2020-02-06 07:58:38 -08:00
q , err := db . Querier ( context . TODO ( ) , 0 , numSamples )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-10-09 06:21:46 -07:00
2020-06-09 09:57:31 -07:00
res := q . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2017-05-23 05:43:30 -07:00
2019-01-28 03:24:49 -08:00
expSamples := make ( [ ] tsdbutil . Sample , 0 , len ( c . remaint ) )
2017-05-23 05:43:30 -07:00
for _ , ts := range c . remaint {
expSamples = append ( expSamples , sample { ts , smpls [ ts ] } )
}
2020-02-06 07:58:38 -08:00
expss := newMockSeriesSet ( [ ] storage . Series {
2020-07-31 08:03:02 -07:00
storage . NewListSeries ( labels . FromStrings ( "a" , "b" ) , expSamples ) ,
2017-05-23 05:43:30 -07:00
} )
for {
eok , rok := expss . Next ( ) , res . Next ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , eok , rok )
2017-05-23 05:43:30 -07:00
if ! eok {
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0 , len ( res . Warnings ( ) ) )
2017-05-23 05:43:30 -07:00
continue Outer
}
sexp := expss . At ( )
sres := res . At ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , sexp . Labels ( ) , sres . Labels ( ) )
2017-05-23 05:43:30 -07:00
2020-07-31 08:03:02 -07:00
smplExp , errExp := storage . ExpandSamples ( sexp . Iterator ( ) , nil )
smplRes , errRes := storage . ExpandSamples ( sres . Iterator ( ) , nil )
2017-05-23 05:43:30 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , errExp , errRes )
require . Equal ( t , smplExp , smplRes )
2017-05-23 05:43:30 -07:00
}
}
}
2017-08-28 15:39:17 -07:00
func TestAmendDatapointCausesError ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-08-28 15:39:17 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 0 , 1 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrDuplicateSampleForTimestamp , err )
require . NoError ( t , app . Rollback ( ) )
2017-08-28 15:39:17 -07:00
}
func TestDuplicateNaNDatapointNoAmendError ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-08-28 15:39:17 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 0 , math . NaN ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 0 , math . NaN ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-08-28 15:39:17 -07:00
}
func TestNonDuplicateNaNDatapointsCausesAmendError ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2020-02-10 15:15:01 -08:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 0 , math . Float64frombits ( 0x7ff0000000000001 ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 0 , math . Float64frombits ( 0x7ff0000000000002 ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrDuplicateSampleForTimestamp , err )
2017-08-28 15:39:17 -07:00
}
2020-03-01 23:18:05 -08:00
func TestEmptyLabelsetCausesError ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2020-03-01 23:18:05 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-03-01 23:18:05 -08:00
} ( )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { } , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . Error ( t , err )
require . Equal ( t , "empty labelset: invalid sample" , err . Error ( ) )
2020-03-01 23:18:05 -08:00
}
2017-08-28 15:39:17 -07:00
func TestSkippingInvalidValuesInSameTxn ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-08-28 15:39:17 -07:00
// Append AmendedValue.
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 0 , 2 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
// Make sure the right value is stored.
2020-02-06 07:58:38 -08:00
q , err := db . Querier ( context . TODO ( ) , 0 , 10 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-10-09 06:21:46 -07:00
2019-11-18 11:53:33 -08:00
ssMap := query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2017-08-28 15:39:17 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] [ ] tsdbutil . Sample {
2019-08-13 01:34:14 -07:00
labels . New ( labels . Label { Name : "a" , Value : "b" } ) . String ( ) : { sample { 0 , 1 } } ,
2017-08-28 15:39:17 -07:00
} , ssMap )
// Append Out of Order Value.
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 10 , 3 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 7 , 5 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
2020-02-06 07:58:38 -08:00
q , err = db . Querier ( context . TODO ( ) , 0 , 10 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-10-09 06:21:46 -07:00
2019-11-18 11:53:33 -08:00
ssMap = query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2017-08-28 15:39:17 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] [ ] tsdbutil . Sample {
2019-08-13 01:34:14 -07:00
labels . New ( labels . Label { Name : "a" , Value : "b" } ) . String ( ) : { sample { 0 , 1 } , sample { 10 , 3 } } ,
2017-08-28 15:39:17 -07:00
} , ssMap )
}
2017-10-03 05:06:26 -07:00
func TestDB_Snapshot ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2017-10-03 05:06:26 -07:00
// append data
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2017-10-03 05:06:26 -07:00
mint := int64 ( 1414141414000 )
for i := 0 ; i < 1000 ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , mint + int64 ( i ) , 1.0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-10-03 05:06:26 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2017-10-03 05:06:26 -07:00
// create snapshot
snap , err := ioutil . TempDir ( "" , "snap" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-02-28 03:04:55 -08:00
2019-03-19 06:31:57 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( snap ) )
2019-03-19 06:31:57 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Snapshot ( snap , true ) )
require . NoError ( t , db . Close ( ) )
2017-10-03 05:06:26 -07:00
// reopen DB from snapshot
2021-06-05 07:29:32 -07:00
db , err = Open ( snap , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , db . Close ( ) ) } ( )
2017-10-03 05:06:26 -07:00
2020-02-06 07:58:38 -08:00
querier , err := db . Querier ( context . TODO ( ) , mint , mint + 1000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , querier . Close ( ) ) } ( )
2017-10-03 05:06:26 -07:00
// sum values
2020-06-09 09:57:31 -07:00
seriesSet := querier . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2017-10-03 05:06:26 -07:00
sum := 0.0
for seriesSet . Next ( ) {
series := seriesSet . At ( ) . Iterator ( )
for series . Next ( ) {
_ , v := series . At ( )
sum += v
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , series . Err ( ) )
2017-10-03 05:06:26 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , seriesSet . Err ( ) )
require . Equal ( t , 0 , len ( seriesSet . Warnings ( ) ) )
require . Equal ( t , 1000.0 , sum )
2017-10-03 05:06:26 -07:00
}
2019-07-03 03:47:31 -07:00
// TestDB_Snapshot_ChunksOutsideOfCompactedRange ensures that a snapshot removes chunks samples
// that are outside the set block time range.
// See https://github.com/prometheus/prometheus/issues/5105
func TestDB_Snapshot_ChunksOutsideOfCompactedRange ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-07-03 03:47:31 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2019-07-03 03:47:31 -07:00
mint := int64 ( 1414141414000 )
for i := 0 ; i < 1000 ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , mint + int64 ( i ) , 1.0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-07-03 03:47:31 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2019-07-03 03:47:31 -07:00
snap , err := ioutil . TempDir ( "" , "snap" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-07-03 03:47:31 -07:00
// Hackingly introduce "race", by having lower max time then maxTime in last chunk.
2020-07-27 21:42:42 -07:00
db . head . maxTime . Sub ( 10 )
2019-07-03 03:47:31 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( snap ) )
2019-07-03 03:47:31 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Snapshot ( snap , true ) )
require . NoError ( t , db . Close ( ) )
2019-07-03 03:47:31 -07:00
// Reopen DB from snapshot.
2021-06-05 07:29:32 -07:00
db , err = Open ( snap , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , db . Close ( ) ) } ( )
2019-07-03 03:47:31 -07:00
2020-02-06 07:58:38 -08:00
querier , err := db . Querier ( context . TODO ( ) , mint , mint + 1000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , querier . Close ( ) ) } ( )
2019-07-03 03:47:31 -07:00
// Sum values.
2020-06-09 09:57:31 -07:00
seriesSet := querier . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2019-07-03 03:47:31 -07:00
sum := 0.0
for seriesSet . Next ( ) {
series := seriesSet . At ( ) . Iterator ( )
for series . Next ( ) {
_ , v := series . At ( )
sum += v
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , series . Err ( ) )
2019-07-03 03:47:31 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , seriesSet . Err ( ) )
require . Equal ( t , 0 , len ( seriesSet . Warnings ( ) ) )
2019-07-03 03:47:31 -07:00
// Since we snapshotted with MaxTime - 10, so expect 10 less samples.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1000.0 - 10 , sum )
2019-07-03 03:47:31 -07:00
}
2017-11-22 04:28:06 -08:00
func TestDB_SnapshotWithDelete ( t * testing . T ) {
numSamples := int64 ( 10 )
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2021-10-25 06:14:40 -07:00
defer func ( ) { require . NoError ( t , db . Close ( ) ) } ( )
2017-11-22 04:28:06 -08:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2017-11-22 04:28:06 -08:00
smpls := make ( [ ] float64 , numSamples )
for i := int64 ( 0 ) ; i < numSamples ; i ++ {
smpls [ i ] = rand . Float64 ( )
2021-02-18 04:07:00 -08:00
app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , i , smpls [ i ] )
2017-11-22 04:28:06 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2017-11-22 04:28:06 -08:00
cases := [ ] struct {
2019-09-19 02:15:41 -07:00
intervals tombstones . Intervals
2017-11-22 04:28:06 -08:00
remaint [ ] int64
} {
{
2019-09-19 02:15:41 -07:00
intervals : tombstones . Intervals { { Mint : 1 , Maxt : 3 } , { Mint : 4 , Maxt : 7 } } ,
2017-11-22 04:28:06 -08:00
remaint : [ ] int64 { 0 , 8 , 9 } ,
} ,
}
Outer :
for _ , c := range cases {
// TODO(gouthamve): Reset the tombstones somehow.
// Delete the ranges.
for _ , r := range c . intervals {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Delete ( r . Mint , r . Maxt , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ) )
2017-11-22 04:28:06 -08:00
}
// create snapshot
snap , err := ioutil . TempDir ( "" , "snap" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-02-28 03:04:55 -08:00
2019-03-19 06:31:57 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( snap ) )
2019-03-19 06:31:57 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Snapshot ( snap , true ) )
2017-11-22 04:28:06 -08:00
// reopen DB from snapshot
2021-10-25 06:14:40 -07:00
newDB , err := Open ( snap , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-10-25 06:14:40 -07:00
defer func ( ) { require . NoError ( t , newDB . Close ( ) ) } ( )
2017-11-22 04:28:06 -08:00
// Compare the result.
2021-10-25 06:14:40 -07:00
q , err := newDB . Querier ( context . TODO ( ) , 0 , numSamples )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , q . Close ( ) ) } ( )
2017-11-22 04:28:06 -08:00
2020-06-09 09:57:31 -07:00
res := q . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2017-11-22 04:28:06 -08:00
2019-01-28 03:24:49 -08:00
expSamples := make ( [ ] tsdbutil . Sample , 0 , len ( c . remaint ) )
2017-11-22 04:28:06 -08:00
for _ , ts := range c . remaint {
expSamples = append ( expSamples , sample { ts , smpls [ ts ] } )
}
2020-02-06 07:58:38 -08:00
expss := newMockSeriesSet ( [ ] storage . Series {
2020-07-31 08:03:02 -07:00
storage . NewListSeries ( labels . FromStrings ( "a" , "b" ) , expSamples ) ,
2017-11-22 04:28:06 -08:00
} )
if len ( expSamples ) == 0 {
2020-10-29 02:43:23 -07:00
require . False ( t , res . Next ( ) )
2017-11-22 04:28:06 -08:00
continue
}
for {
eok , rok := expss . Next ( ) , res . Next ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , eok , rok )
2017-11-22 04:28:06 -08:00
if ! eok {
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0 , len ( res . Warnings ( ) ) )
2017-11-22 04:28:06 -08:00
continue Outer
}
sexp := expss . At ( )
sres := res . At ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , sexp . Labels ( ) , sres . Labels ( ) )
2017-11-22 04:28:06 -08:00
2020-07-31 08:03:02 -07:00
smplExp , errExp := storage . ExpandSamples ( sexp . Iterator ( ) , nil )
smplRes , errRes := storage . ExpandSamples ( sres . Iterator ( ) , nil )
2017-11-22 04:28:06 -08:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , errExp , errRes )
require . Equal ( t , smplExp , smplRes )
2017-11-22 04:28:06 -08:00
}
}
}
2017-08-28 15:39:17 -07:00
func TestDB_e2e ( t * testing . T ) {
const (
numDatapoints = 1000
numRanges = 1000
timeInterval = int64 ( 3 )
)
// Create 8 series with 1000 data-points of different ranges and run queries.
2019-11-18 11:53:33 -08:00
lbls := [ ] labels . Labels {
2017-08-28 15:39:17 -07:00
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2017-08-28 15:39:17 -07:00
} ,
}
2019-02-14 05:29:41 -08:00
seriesMap := map [ string ] [ ] tsdbutil . Sample { }
2017-08-28 15:39:17 -07:00
for _ , l := range lbls {
2019-02-14 05:29:41 -08:00
seriesMap [ labels . New ( l ... ) . String ( ) ] = [ ] tsdbutil . Sample { }
2017-08-28 15:39:17 -07:00
}
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-08-28 15:39:17 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2017-08-28 15:39:17 -07:00
for _ , l := range lbls {
lset := labels . New ( l ... )
2019-02-14 05:29:41 -08:00
series := [ ] tsdbutil . Sample { }
2017-08-28 15:39:17 -07:00
ts := rand . Int63n ( 300 )
for i := 0 ; i < numDatapoints ; i ++ {
v := rand . Float64 ( )
series = append ( series , sample { ts , v } )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , lset , ts , v )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-08-28 15:39:17 -07:00
ts += rand . Int63n ( timeInterval ) + 1
}
seriesMap [ lset . String ( ) ] = series
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
// Query each selector on 1000 random time-ranges.
queries := [ ] struct {
2019-11-18 11:53:33 -08:00
ms [ ] * labels . Matcher
2017-08-28 15:39:17 -07:00
} {
{
2019-11-18 11:53:33 -08:00
ms : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-11-18 11:53:33 -08:00
ms : [ ] * labels . Matcher {
labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ,
labels . MustNewMatcher ( labels . MatchEqual , "job" , "prom-k8s" ) ,
2017-08-28 15:39:17 -07:00
} ,
} ,
{
2019-11-18 11:53:33 -08:00
ms : [ ] * labels . Matcher {
labels . MustNewMatcher ( labels . MatchEqual , "a" , "c" ) ,
labels . MustNewMatcher ( labels . MatchEqual , "instance" , "localhost:9090" ) ,
labels . MustNewMatcher ( labels . MatchEqual , "job" , "prometheus" ) ,
2017-08-28 15:39:17 -07:00
} ,
} ,
// TODO: Add Regexp Matchers.
}
for _ , qry := range queries {
matched := labels . Slice { }
for _ , ls := range lbls {
s := labels . Selector ( qry . ms )
if s . Matches ( ls ) {
matched = append ( matched , ls )
}
}
sort . Sort ( matched )
for i := 0 ; i < numRanges ; i ++ {
mint := rand . Int63n ( 300 )
maxt := mint + rand . Int63n ( timeInterval * int64 ( numDatapoints ) )
2019-02-14 05:29:41 -08:00
expected := map [ string ] [ ] tsdbutil . Sample { }
2017-08-28 15:39:17 -07:00
// Build the mockSeriesSet.
for _ , m := range matched {
smpls := boundedSamples ( seriesMap [ m . String ( ) ] , mint , maxt )
if len ( smpls ) > 0 {
expected [ m . String ( ) ] = smpls
}
}
2020-02-06 07:58:38 -08:00
q , err := db . Querier ( context . TODO ( ) , mint , maxt )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-10-09 06:21:46 -07:00
2020-06-09 09:57:31 -07:00
ss := q . Select ( false , nil , qry . ms ... )
2019-02-14 05:29:41 -08:00
result := map [ string ] [ ] tsdbutil . Sample { }
2017-08-28 15:39:17 -07:00
for ss . Next ( ) {
x := ss . At ( )
2020-07-31 08:03:02 -07:00
smpls , err := storage . ExpandSamples ( x . Iterator ( ) , newSample )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-08-28 15:39:17 -07:00
if len ( smpls ) > 0 {
result [ x . Labels ( ) . String ( ) ] = smpls
}
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , ss . Err ( ) )
require . Equal ( t , 0 , len ( ss . Warnings ( ) ) )
require . Equal ( t , expected , result )
2017-08-28 15:39:17 -07:00
q . Close ( )
}
}
}
2017-11-10 12:19:39 -08:00
func TestWALFlushedOnDBClose ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2017-11-10 12:19:39 -08:00
2018-02-23 07:04:50 -08:00
dirDb := db . Dir ( )
2017-11-10 12:19:39 -08:00
lbls := labels . Labels { labels . Label { Name : "labelname" , Value : "labelvalue" } }
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , lbls , 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2017-11-10 12:19:39 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2017-11-10 12:19:39 -08:00
2021-06-05 07:29:32 -07:00
db , err = Open ( dirDb , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , db . Close ( ) ) } ( )
2017-11-10 12:19:39 -08:00
2020-02-06 07:58:38 -08:00
q , err := db . Querier ( context . TODO ( ) , 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-11-10 12:19:39 -08:00
2020-02-06 07:58:38 -08:00
values , ws , err := q . LabelValues ( "labelname" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , len ( ws ) )
require . Equal ( t , [ ] string { "labelvalue" } , values )
2017-11-10 12:19:39 -08:00
}
2017-11-23 05:27:10 -08:00
2019-03-25 16:38:12 -07:00
func TestWALSegmentSizeOptions ( t * testing . T ) {
tests := map [ int ] func ( dbdir string , segmentSize int ) {
// Default Wal Size.
0 : func ( dbDir string , segmentSize int ) {
2020-05-06 08:30:00 -07:00
filesAndDir , err := ioutil . ReadDir ( filepath . Join ( dbDir , "wal" ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
files := [ ] os . FileInfo { }
for _ , f := range filesAndDir {
if ! f . IsDir ( ) {
files = append ( files , f )
}
}
// All the full segment files (all but the last) should match the segment size option.
2019-03-25 16:38:12 -07:00
for _ , f := range files [ : len ( files ) - 1 ] {
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( DefaultOptions ( ) . WALSegmentSize ) , f . Size ( ) , "WAL file size doesn't match WALSegmentSize option, filename: %v" , f . Name ( ) )
2019-03-25 16:38:12 -07:00
}
lastFile := files [ len ( files ) - 1 ]
2020-10-29 02:43:23 -07:00
require . Greater ( t , int64 ( DefaultOptions ( ) . WALSegmentSize ) , lastFile . Size ( ) , "last WAL file size is not smaller than the WALSegmentSize option, filename: %v" , lastFile . Name ( ) )
2019-03-25 16:38:12 -07:00
} ,
// Custom Wal Size.
2 * 32 * 1024 : func ( dbDir string , segmentSize int ) {
2020-05-06 08:30:00 -07:00
filesAndDir , err := ioutil . ReadDir ( filepath . Join ( dbDir , "wal" ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
files := [ ] os . FileInfo { }
for _ , f := range filesAndDir {
if ! f . IsDir ( ) {
files = append ( files , f )
}
}
2020-10-29 02:43:23 -07:00
require . Greater ( t , len ( files ) , 1 , "current WALSegmentSize should result in more than a single WAL file." )
2020-05-06 08:30:00 -07:00
// All the full segment files (all but the last) should match the segment size option.
2019-03-25 16:38:12 -07:00
for _ , f := range files [ : len ( files ) - 1 ] {
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( segmentSize ) , f . Size ( ) , "WAL file size doesn't match WALSegmentSize option, filename: %v" , f . Name ( ) )
2019-03-25 16:38:12 -07:00
}
lastFile := files [ len ( files ) - 1 ]
2020-10-29 02:43:23 -07:00
require . Greater ( t , int64 ( segmentSize ) , lastFile . Size ( ) , "last WAL file size is not smaller than the WALSegmentSize option, filename: %v" , lastFile . Name ( ) )
2019-03-25 16:38:12 -07:00
} ,
// Wal disabled.
- 1 : func ( dbDir string , segmentSize int ) {
2020-05-06 08:30:00 -07:00
// Check that WAL dir is not there.
_ , err := os . Stat ( filepath . Join ( dbDir , "wal" ) )
2020-10-29 02:43:23 -07:00
require . Error ( t , err )
2020-05-06 08:30:00 -07:00
// Check that there is chunks dir.
_ , err = os . Stat ( mmappedChunksDir ( dbDir ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-25 16:38:12 -07:00
} ,
2018-12-18 10:56:51 -08:00
}
2019-03-25 16:38:12 -07:00
for segmentSize , testFunc := range tests {
t . Run ( fmt . Sprintf ( "WALSegmentSize %d test" , segmentSize ) , func ( t * testing . T ) {
2020-02-06 07:58:38 -08:00
opts := DefaultOptions ( )
2020-02-11 08:34:09 -08:00
opts . WALSegmentSize = segmentSize
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , opts , nil )
2020-02-10 15:15:01 -08:00
2019-03-25 16:38:12 -07:00
for i := int64 ( 0 ) ; i < 155 ; i ++ {
2020-07-24 07:10:51 -07:00
app := db . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
ref , err := app . Append ( 0 , labels . Labels { labels . Label { Name : "wal" + fmt . Sprintf ( "%d" , i ) , Value : "size" } } , i , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-22 02:57:38 -07:00
for j := int64 ( 1 ) ; j <= 78 ; j ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( ref , nil , i + j , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-22 02:57:38 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2019-03-25 16:38:12 -07:00
}
2018-12-18 10:56:51 -08:00
2019-03-25 16:38:12 -07:00
dbDir := db . Dir ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-02-06 07:58:38 -08:00
testFunc ( dbDir , int ( opts . WALSegmentSize ) )
2019-03-25 16:38:12 -07:00
} )
2018-12-18 10:56:51 -08:00
}
}
2017-11-22 04:34:50 -08:00
func TestTombstoneClean ( t * testing . T ) {
numSamples := int64 ( 10 )
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2017-11-22 04:34:50 -08:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2017-11-22 04:34:50 -08:00
smpls := make ( [ ] float64 , numSamples )
for i := int64 ( 0 ) ; i < numSamples ; i ++ {
smpls [ i ] = rand . Float64 ( )
2021-02-18 04:07:00 -08:00
app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , i , smpls [ i ] )
2017-11-22 04:34:50 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2017-11-22 04:34:50 -08:00
cases := [ ] struct {
2019-09-19 02:15:41 -07:00
intervals tombstones . Intervals
2017-11-22 04:34:50 -08:00
remaint [ ] int64
} {
{
2019-09-19 02:15:41 -07:00
intervals : tombstones . Intervals { { Mint : 1 , Maxt : 3 } , { Mint : 4 , Maxt : 7 } } ,
2017-11-22 04:34:50 -08:00
remaint : [ ] int64 { 0 , 8 , 9 } ,
} ,
}
for _ , c := range cases {
// Delete the ranges.
2021-02-16 21:32:43 -08:00
// Create snapshot.
2017-11-22 04:34:50 -08:00
snap , err := ioutil . TempDir ( "" , "snap" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-02-28 03:04:55 -08:00
2019-03-19 06:31:57 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( snap ) )
2019-03-19 06:31:57 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Snapshot ( snap , true ) )
require . NoError ( t , db . Close ( ) )
2017-11-22 04:34:50 -08:00
2021-02-16 21:32:43 -08:00
// Reopen DB from snapshot.
2021-06-05 07:29:32 -07:00
db , err = Open ( snap , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-10 10:02:01 -08:00
defer db . Close ( )
2017-11-22 04:34:50 -08:00
for _ , r := range c . intervals {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Delete ( r . Mint , r . Maxt , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ) )
2017-11-22 04:34:50 -08:00
}
// All of the setup for THIS line.
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . CleanTombstones ( ) )
2017-11-22 04:34:50 -08:00
// Compare the result.
2020-02-06 07:58:38 -08:00
q , err := db . Querier ( context . TODO ( ) , 0 , numSamples )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-10 10:02:01 -08:00
defer q . Close ( )
2017-11-22 04:34:50 -08:00
2020-06-09 09:57:31 -07:00
res := q . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2017-11-22 04:34:50 -08:00
2019-01-28 03:24:49 -08:00
expSamples := make ( [ ] tsdbutil . Sample , 0 , len ( c . remaint ) )
2017-11-22 04:34:50 -08:00
for _ , ts := range c . remaint {
expSamples = append ( expSamples , sample { ts , smpls [ ts ] } )
}
2020-02-06 07:58:38 -08:00
expss := newMockSeriesSet ( [ ] storage . Series {
2020-07-31 08:03:02 -07:00
storage . NewListSeries ( labels . FromStrings ( "a" , "b" ) , expSamples ) ,
2017-11-22 04:34:50 -08:00
} )
if len ( expSamples ) == 0 {
2020-10-29 02:43:23 -07:00
require . False ( t , res . Next ( ) )
2017-11-22 04:34:50 -08:00
continue
}
for {
eok , rok := expss . Next ( ) , res . Next ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , eok , rok )
2017-11-22 04:34:50 -08:00
if ! eok {
break
}
sexp := expss . At ( )
sres := res . At ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , sexp . Labels ( ) , sres . Labels ( ) )
2017-11-22 04:34:50 -08:00
2020-07-31 08:03:02 -07:00
smplExp , errExp := storage . ExpandSamples ( sexp . Iterator ( ) , nil )
smplRes , errRes := storage . ExpandSamples ( sres . Iterator ( ) , nil )
2017-11-22 04:34:50 -08:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , errExp , errRes )
require . Equal ( t , smplExp , smplRes )
2017-11-22 04:34:50 -08:00
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0 , len ( res . Warnings ( ) ) )
2017-11-22 04:34:50 -08:00
2018-11-07 07:52:41 -08:00
for _ , b := range db . Blocks ( ) {
2020-10-29 02:43:23 -07:00
require . Equal ( t , tombstones . NewMemTombstones ( ) , b . tombstones )
2017-11-22 04:34:50 -08:00
}
}
}
2017-11-29 23:45:23 -08:00
2021-02-16 21:32:43 -08:00
// TestTombstoneCleanResultEmptyBlock tests that a TombstoneClean that results in empty blocks (no timeseries)
// will also delete the resultant block.
func TestTombstoneCleanResultEmptyBlock ( t * testing . T ) {
numSamples := int64 ( 10 )
db := openTestDB ( t , nil , nil )
ctx := context . Background ( )
app := db . Appender ( ctx )
smpls := make ( [ ] float64 , numSamples )
for i := int64 ( 0 ) ; i < numSamples ; i ++ {
smpls [ i ] = rand . Float64 ( )
2021-02-18 14:56:27 -08:00
app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , i , smpls [ i ] )
2021-02-16 21:32:43 -08:00
}
require . NoError ( t , app . Commit ( ) )
// Interval should cover the whole block.
intervals := tombstones . Intervals { { Mint : 0 , Maxt : numSamples } }
// Create snapshot.
snap , err := ioutil . TempDir ( "" , "snap" )
require . NoError ( t , err )
defer func ( ) {
require . NoError ( t , os . RemoveAll ( snap ) )
} ( )
require . NoError ( t , db . Snapshot ( snap , true ) )
require . NoError ( t , db . Close ( ) )
// Reopen DB from snapshot.
2021-06-05 07:29:32 -07:00
db , err = Open ( snap , nil , nil , nil , nil )
2021-02-16 21:32:43 -08:00
require . NoError ( t , err )
defer db . Close ( )
// Create tombstones by deleting all samples.
for _ , r := range intervals {
require . NoError ( t , db . Delete ( r . Mint , r . Maxt , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ) )
}
require . NoError ( t , db . CleanTombstones ( ) )
// After cleaning tombstones that covers the entire block, no blocks should be left behind.
actualBlockDirs , err := blockDirs ( db . dir )
require . NoError ( t , err )
require . Equal ( t , 0 , len ( actualBlockDirs ) )
}
2018-06-04 12:18:44 -07:00
// TestTombstoneCleanFail tests that a failing TombstoneClean doesn't leave any blocks behind.
// When TombstoneClean errors the original block that should be rebuilt doesn't get deleted so
// if TombstoneClean leaves any blocks behind these will overlap.
2018-06-04 05:35:36 -07:00
func TestTombstoneCleanFail ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2018-06-04 05:35:36 -07:00
2021-02-16 21:32:43 -08:00
var oldBlockDirs [ ] string
2018-06-04 05:35:36 -07:00
2021-02-16 21:32:43 -08:00
// Create some blocks pending for compaction.
2018-06-04 12:18:44 -07:00
// totalBlocks should be >=2 so we have enough blocks to trigger compaction failure.
totalBlocks := 2
for i := 0 ; i < totalBlocks ; i ++ {
2021-02-16 21:32:43 -08:00
blockDir := createBlock ( t , db . Dir ( ) , genSeries ( 1 , 1 , int64 ( i ) , int64 ( i ) + 1 ) )
2019-01-16 02:03:52 -08:00
block , err := OpenBlock ( nil , blockDir , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-01-30 22:41:52 -08:00
// Add some fake tombstones to trigger the compaction.
2019-09-19 02:15:41 -07:00
tomb := tombstones . NewMemTombstones ( )
2021-02-16 21:32:43 -08:00
tomb . AddInterval ( 0 , tombstones . Interval { Mint : int64 ( i ) , Maxt : int64 ( i ) + 1 } )
2018-06-04 05:35:36 -07:00
block . tombstones = tomb
db . blocks = append ( db . blocks , block )
2021-02-16 21:32:43 -08:00
oldBlockDirs = append ( oldBlockDirs , blockDir )
2018-06-04 05:35:36 -07:00
}
2018-06-04 12:18:44 -07:00
// Initialize the mockCompactorFailing with a room for a single compaction iteration.
// mockCompactorFailing will fail on the second iteration so we can check if the cleanup works as expected.
db . compactor = & mockCompactorFailing {
t : t ,
blocks : db . blocks ,
max : totalBlocks + 1 ,
}
// The compactor should trigger a failure here.
2020-10-29 02:43:23 -07:00
require . Error ( t , db . CleanTombstones ( ) )
2018-06-04 05:35:36 -07:00
2021-02-16 21:32:43 -08:00
// Now check that the CleanTombstones replaced the old block even after a failure.
2018-06-04 05:35:36 -07:00
actualBlockDirs , err := blockDirs ( db . dir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-16 21:32:43 -08:00
// Only one block should have been replaced by a new block.
require . Equal ( t , len ( oldBlockDirs ) , len ( actualBlockDirs ) )
require . Equal ( t , len ( intersection ( oldBlockDirs , actualBlockDirs ) ) , len ( actualBlockDirs ) - 1 )
}
// TestTombstoneCleanRetentionLimitsRace tests that a CleanTombstones operation
// and retention limit policies, when triggered at the same time,
// won't race against each other.
func TestTombstoneCleanRetentionLimitsRace ( t * testing . T ) {
2021-10-15 03:23:48 -07:00
if testing . Short ( ) {
t . Skip ( "skipping test in short mode." )
}
2021-02-16 21:32:43 -08:00
opts := DefaultOptions ( )
var wg sync . WaitGroup
// We want to make sure that a race doesn't happen when a normal reload and a CleanTombstones()
// reload try to delete the same block. Without the correct lock placement, it can happen if a
// block is marked for deletion due to retention limits and also has tombstones to be cleaned at
// the same time.
//
// That is something tricky to trigger, so let's try several times just to make sure.
for i := 0 ; i < 20 ; i ++ {
db := openTestDB ( t , opts , nil )
totalBlocks := 20
dbDir := db . Dir ( )
// Generate some blocks with old mint (near epoch).
for j := 0 ; j < totalBlocks ; j ++ {
blockDir := createBlock ( t , dbDir , genSeries ( 10 , 1 , int64 ( j ) , int64 ( j ) + 1 ) )
block , err := OpenBlock ( nil , blockDir , nil )
require . NoError ( t , err )
// Cover block with tombstones so it can be deleted with CleanTombstones() as well.
tomb := tombstones . NewMemTombstones ( )
tomb . AddInterval ( 0 , tombstones . Interval { Mint : int64 ( j ) , Maxt : int64 ( j ) + 1 } )
block . tombstones = tomb
db . blocks = append ( db . blocks , block )
}
wg . Add ( 2 )
// Run reload and CleanTombstones together, with a small time window randomization
go func ( ) {
defer wg . Done ( )
time . Sleep ( time . Duration ( rand . Float64 ( ) * 100 * float64 ( time . Millisecond ) ) )
require . NoError ( t , db . reloadBlocks ( ) )
} ( )
go func ( ) {
defer wg . Done ( )
time . Sleep ( time . Duration ( rand . Float64 ( ) * 100 * float64 ( time . Millisecond ) ) )
require . NoError ( t , db . CleanTombstones ( ) )
} ( )
wg . Wait ( )
require . NoError ( t , db . Close ( ) )
}
}
func intersection ( oldBlocks , actualBlocks [ ] string ) ( intersection [ ] string ) {
hash := make ( map [ string ] bool )
for _ , e := range oldBlocks {
hash [ e ] = true
}
for _ , e := range actualBlocks {
// If block present in the hashmap then append intersection list.
if hash [ e ] {
intersection = append ( intersection , e )
}
}
return
2018-06-04 05:35:36 -07:00
}
2018-06-04 12:18:44 -07:00
// mockCompactorFailing creates a new empty block on every write and fails when reached the max allowed total.
2018-06-04 05:35:36 -07:00
type mockCompactorFailing struct {
t * testing . T
blocks [ ] * Block
2018-06-04 12:18:44 -07:00
max int
2018-06-04 05:35:36 -07:00
}
func ( * mockCompactorFailing ) Plan ( dir string ) ( [ ] string , error ) {
return nil , nil
}
2021-10-22 01:06:44 -07:00
2018-06-27 06:47:11 -07:00
func ( c * mockCompactorFailing ) Write ( dest string , b BlockReader , mint , maxt int64 , parent * BlockMeta ) ( ulid . ULID , error ) {
2018-06-04 12:18:44 -07:00
if len ( c . blocks ) >= c . max {
return ulid . ULID { } , fmt . Errorf ( "the compactor already did the maximum allowed blocks so it is time to fail" )
2018-06-04 05:35:36 -07:00
}
2018-06-04 12:18:44 -07:00
2019-07-03 03:47:31 -07:00
block , err := OpenBlock ( nil , createBlock ( c . t , dest , genSeries ( 1 , 1 , 0 , 1 ) ) , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( c . t , err )
require . NoError ( c . t , block . Close ( ) ) // Close block as we won't be using anywhere.
2018-06-04 05:35:36 -07:00
c . blocks = append ( c . blocks , block )
2018-06-04 12:18:44 -07:00
// Now check that all expected blocks are actually persisted on disk.
// This way we make sure that the we have some blocks that are supposed to be removed.
var expectedBlocks [ ] string
for _ , b := range c . blocks {
expectedBlocks = append ( expectedBlocks , filepath . Join ( dest , b . Meta ( ) . ULID . String ( ) ) )
}
actualBlockDirs , err := blockDirs ( dest )
2020-10-29 02:43:23 -07:00
require . NoError ( c . t , err )
2018-06-04 12:18:44 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( c . t , expectedBlocks , actualBlockDirs )
2018-06-04 12:18:44 -07:00
2018-06-04 05:35:36 -07:00
return block . Meta ( ) . ULID , nil
}
2020-02-06 07:58:38 -08:00
func ( * mockCompactorFailing ) Compact ( string , [ ] string , [ ] * Block ) ( ulid . ULID , error ) {
2018-06-04 05:35:36 -07:00
return ulid . ULID { } , nil
}
2019-01-16 02:03:52 -08:00
func TestTimeRetention ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , [ ] int64 { 1000 } )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
blocks := [ ] * BlockMeta {
{ MinTime : 500 , MaxTime : 900 } , // Oldest block
{ MinTime : 1000 , MaxTime : 1500 } ,
{ MinTime : 1500 , MaxTime : 2000 } , // Newest Block
}
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
for _ , m := range blocks {
2019-01-28 03:24:49 -08:00
createBlock ( t , db . Dir ( ) , genSeries ( 10 , 10 , m . MinTime , m . MaxTime ) )
2019-01-16 02:03:52 -08:00
}
2017-11-23 05:27:10 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . reloadBlocks ( ) ) // Reload the db to register the new blocks.
require . Equal ( t , len ( blocks ) , len ( db . Blocks ( ) ) ) // Ensure all blocks are registered.
2018-02-28 03:04:55 -08:00
2020-02-11 08:34:09 -08:00
db . opts . RetentionDuration = blocks [ 2 ] . MaxTime - blocks [ 1 ] . MinTime
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . reloadBlocks ( ) )
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
expBlocks := blocks [ 1 : ]
actBlocks := db . Blocks ( )
2017-11-23 05:27:10 -08:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1 , int ( prom_testutil . ToFloat64 ( db . metrics . timeRetentionCount ) ) , "metric retention count mismatch" )
require . Equal ( t , len ( expBlocks ) , len ( actBlocks ) )
require . Equal ( t , expBlocks [ 0 ] . MaxTime , actBlocks [ 0 ] . meta . MaxTime )
require . Equal ( t , expBlocks [ len ( expBlocks ) - 1 ] . MaxTime , actBlocks [ len ( actBlocks ) - 1 ] . meta . MaxTime )
2019-01-16 02:03:52 -08:00
}
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
func TestSizeRetention ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , [ ] int64 { 100 } )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
blocks := [ ] * BlockMeta {
{ MinTime : 100 , MaxTime : 200 } , // Oldest block
{ MinTime : 200 , MaxTime : 300 } ,
{ MinTime : 300 , MaxTime : 400 } ,
{ MinTime : 400 , MaxTime : 500 } ,
{ MinTime : 500 , MaxTime : 600 } , // Newest Block
}
2018-02-28 03:04:55 -08:00
2019-01-16 02:03:52 -08:00
for _ , m := range blocks {
2019-01-28 03:24:49 -08:00
createBlock ( t , db . Dir ( ) , genSeries ( 100 , 10 , m . MinTime , m . MaxTime ) )
2019-01-16 02:03:52 -08:00
}
2017-11-23 05:27:10 -08:00
2019-11-11 18:40:16 -08:00
headBlocks := [ ] * BlockMeta {
{ MinTime : 700 , MaxTime : 800 } ,
}
// Add some data to the WAL.
2020-07-30 04:11:13 -07:00
headApp := db . Head ( ) . Appender ( context . Background ( ) )
2019-11-11 18:40:16 -08:00
for _ , m := range headBlocks {
2020-11-03 02:04:59 -08:00
series := genSeries ( 100 , 10 , m . MinTime , m . MaxTime + 1 )
2019-11-11 18:40:16 -08:00
for _ , s := range series {
it := s . Iterator ( )
for it . Next ( ) {
tim , v := it . At ( )
2021-02-18 04:07:00 -08:00
_ , err := headApp . Append ( 0 , s . Labels ( ) , tim , v )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-11-11 18:40:16 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , it . Err ( ) )
2019-11-11 18:40:16 -08:00
}
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , headApp . Commit ( ) )
2019-11-11 18:40:16 -08:00
2019-01-16 02:03:52 -08:00
// Test that registered size matches the actual disk size.
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . reloadBlocks ( ) ) // Reload the db to register the new db size.
require . Equal ( t , len ( blocks ) , len ( db . Blocks ( ) ) ) // Ensure all blocks are registered.
2020-01-08 00:43:27 -08:00
blockSize := int64 ( prom_testutil . ToFloat64 ( db . metrics . blocksBytes ) ) // Use the actual internal metrics.
2019-11-11 18:40:16 -08:00
walSize , err := db . Head ( ) . wal . Size ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-11-03 02:04:59 -08:00
cdmSize , err := db . Head ( ) . chunkDiskMapper . Size ( )
require . NoError ( t , err )
require . NotZero ( t , cdmSize )
// Expected size should take into account block size + WAL size + Head
// chunks size
expSize := blockSize + walSize + cdmSize
2019-11-11 18:40:16 -08:00
actSize , err := fileutil . DirSize ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , expSize , actSize , "registered size doesn't match actual disk size" )
2019-11-11 18:40:16 -08:00
// Create a WAL checkpoint, and compare sizes.
2020-09-01 02:16:57 -07:00
first , last , err := wal . Segments ( db . Head ( ) . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-11-06 03:10:04 -07:00
_ , err = wal . Checkpoint ( log . NewNopLogger ( ) , db . Head ( ) . wal , first , last - 1 , func ( x chunks . HeadSeriesRef ) bool { return false } , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-01-08 00:43:27 -08:00
blockSize = int64 ( prom_testutil . ToFloat64 ( db . metrics . blocksBytes ) ) // Use the actual internal metrics.
2019-11-11 18:40:16 -08:00
walSize , err = db . Head ( ) . wal . Size ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-11-03 02:04:59 -08:00
cdmSize , err = db . Head ( ) . chunkDiskMapper . Size ( )
require . NoError ( t , err )
require . NotZero ( t , cdmSize )
expSize = blockSize + walSize + cdmSize
actSize , err = fileutil . DirSize ( db . Dir ( ) )
require . NoError ( t , err )
require . Equal ( t , expSize , actSize , "registered size doesn't match actual disk size" )
// Truncate Chunk Disk Mapper and compare sizes.
require . NoError ( t , db . Head ( ) . chunkDiskMapper . Truncate ( 900 ) )
cdmSize , err = db . Head ( ) . chunkDiskMapper . Size ( )
require . NoError ( t , err )
require . NotZero ( t , cdmSize )
expSize = blockSize + walSize + cdmSize
2019-11-11 18:40:16 -08:00
actSize , err = fileutil . DirSize ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , expSize , actSize , "registered size doesn't match actual disk size" )
2019-01-16 02:03:52 -08:00
// Decrease the max bytes limit so that a delete is triggered.
// Check total size, total count and check that the oldest block was deleted.
firstBlockSize := db . Blocks ( ) [ 0 ] . Size ( )
sizeLimit := actSize - firstBlockSize
2020-10-29 02:43:23 -07:00
db . opts . MaxBytes = sizeLimit // Set the new db size limit one block smaller that the actual size.
require . NoError ( t , db . reloadBlocks ( ) ) // Reload the db to register the new db size.
2019-01-16 02:03:52 -08:00
expBlocks := blocks [ 1 : ]
actBlocks := db . Blocks ( )
2019-11-11 18:40:16 -08:00
blockSize = int64 ( prom_testutil . ToFloat64 ( db . metrics . blocksBytes ) )
walSize , err = db . Head ( ) . wal . Size ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-11-03 02:04:59 -08:00
cdmSize , err = db . Head ( ) . chunkDiskMapper . Size ( )
require . NoError ( t , err )
require . NotZero ( t , cdmSize )
2019-11-11 18:40:16 -08:00
// Expected size should take into account block size + WAL size
2020-11-03 02:04:59 -08:00
expSize = blockSize + walSize + cdmSize
2020-01-02 06:54:09 -08:00
actRetentionCount := int ( prom_testutil . ToFloat64 ( db . metrics . sizeRetentionCount ) )
2019-11-11 18:40:16 -08:00
actSize , err = fileutil . DirSize ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 1 , actRetentionCount , "metric retention count mismatch" )
require . Equal ( t , actSize , expSize , "metric db size doesn't match actual disk size" )
require . LessOrEqual ( t , expSize , sizeLimit , "actual size (%v) is expected to be less than or equal to limit (%v)" , expSize , sizeLimit )
require . Equal ( t , len ( blocks ) - 1 , len ( actBlocks ) , "new block count should be decreased from:%v to:%v" , len ( blocks ) , len ( blocks ) - 1 )
require . Equal ( t , expBlocks [ 0 ] . MaxTime , actBlocks [ 0 ] . meta . MaxTime , "maxT mismatch of the first block" )
require . Equal ( t , expBlocks [ len ( expBlocks ) - 1 ] . MaxTime , actBlocks [ len ( actBlocks ) - 1 ] . meta . MaxTime , "maxT mismatch of the last block" )
2019-01-16 02:03:52 -08:00
}
2017-11-23 05:27:10 -08:00
2019-07-27 01:52:25 -07:00
func TestSizeRetentionMetric ( t * testing . T ) {
cases := [ ] struct {
maxBytes int64
expMaxBytes int64
} {
{ maxBytes : 1000 , expMaxBytes : 1000 } ,
{ maxBytes : 0 , expMaxBytes : 0 } ,
{ maxBytes : - 1000 , expMaxBytes : 0 } ,
}
for _ , c := range cases {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , & Options {
2020-02-11 08:34:09 -08:00
MaxBytes : c . maxBytes ,
2020-02-06 07:58:38 -08:00
} , [ ] int64 { 100 } )
2020-02-10 15:15:01 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-02-10 15:15:01 -08:00
} ( )
2019-07-27 01:52:25 -07:00
actMaxBytes := int64 ( prom_testutil . ToFloat64 ( db . metrics . maxBytes ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , actMaxBytes , c . expMaxBytes , "metric retention limit bytes mismatch" )
2019-07-27 01:52:25 -07:00
}
}
2017-12-17 10:08:21 -08:00
func TestNotMatcherSelectsLabelsUnsetSeries ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-12-17 10:08:21 -08:00
labelpairs := [ ] labels . Labels {
labels . FromStrings ( "a" , "abcd" , "b" , "abcde" ) ,
labels . FromStrings ( "labelname" , "labelvalue" ) ,
}
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2017-12-17 10:08:21 -08:00
for _ , lbls := range labelpairs {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , lbls , 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-17 10:08:21 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2017-12-17 10:08:21 -08:00
cases := [ ] struct {
selector labels . Selector
series [ ] labels . Labels
} { {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchNotEqual , "lname" , "lvalue" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : labelpairs ,
} , {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchEqual , "a" , "abcd" ) ,
labels . MustNewMatcher ( labels . MatchNotEqual , "b" , "abcde" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : [ ] labels . Labels { } ,
} , {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchEqual , "a" , "abcd" ) ,
labels . MustNewMatcher ( labels . MatchNotEqual , "b" , "abc" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : [ ] labels . Labels { labelpairs [ 0 ] } ,
} , {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchNotRegexp , "a" , "abd.*" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : labelpairs ,
} , {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchNotRegexp , "a" , "abc.*" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : labelpairs [ 1 : ] ,
} , {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchNotRegexp , "c" , "abd.*" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : labelpairs ,
} , {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchNotRegexp , "labelname" , "labelvalue" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : labelpairs [ : 1 ] ,
} }
2020-02-06 07:58:38 -08:00
q , err := db . Querier ( context . TODO ( ) , 0 , 10 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , q . Close ( ) ) } ( )
2017-12-17 10:08:21 -08:00
for _ , c := range cases {
2020-06-09 09:57:31 -07:00
ss := q . Select ( false , nil , c . selector ... )
lres , _ , ws , err := expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , len ( ws ) )
require . Equal ( t , c . series , lres )
2017-12-17 10:08:21 -08:00
}
}
2020-02-12 11:22:27 -08:00
// expandSeriesSet returns the raw labels in the order they are retrieved from
// the series set and the samples keyed by Labels().String().
2020-06-09 09:57:31 -07:00
func expandSeriesSet ( ss storage . SeriesSet ) ( [ ] labels . Labels , map [ string ] [ ] sample , storage . Warnings , error ) {
2020-02-12 11:22:27 -08:00
resultLabels := [ ] labels . Labels { }
resultSamples := map [ string ] [ ] sample { }
2017-12-17 10:08:21 -08:00
for ss . Next ( ) {
2020-02-12 11:22:27 -08:00
series := ss . At ( )
samples := [ ] sample { }
it := series . Iterator ( )
for it . Next ( ) {
t , v := it . At ( )
samples = append ( samples , sample { t : t , v : v } )
}
resultLabels = append ( resultLabels , series . Labels ( ) )
resultSamples [ series . Labels ( ) . String ( ) ] = samples
2017-12-17 10:08:21 -08:00
}
2020-06-09 09:57:31 -07:00
return resultLabels , resultSamples , ss . Warnings ( ) , ss . Err ( )
2017-12-17 10:08:21 -08:00
}
2018-03-28 07:50:52 -07:00
2018-03-28 10:33:41 -07:00
func TestOverlappingBlocksDetectsAllOverlaps ( t * testing . T ) {
// Create 10 blocks that does not overlap (0-10, 10-20, ..., 100-110) but in reverse order to ensure our algorithm
// will handle that.
2021-10-22 01:06:44 -07:00
metas := make ( [ ] BlockMeta , 11 )
2018-03-28 10:33:41 -07:00
for i := 10 ; i >= 0 ; i -- {
2018-03-28 15:19:22 -07:00
metas [ i ] = BlockMeta { MinTime : int64 ( i * 10 ) , MaxTime : int64 ( ( i + 1 ) * 10 ) }
2018-03-28 07:50:52 -07:00
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0 , len ( OverlappingBlocks ( metas ) ) , "we found unexpected overlaps" )
2018-03-28 07:50:52 -07:00
2018-05-22 05:51:20 -07:00
// Add overlapping blocks. We've to establish order again since we aren't interested
// in trivial overlaps caused by unorderedness.
add := func ( ms ... BlockMeta ) [ ] BlockMeta {
repl := append ( append ( [ ] BlockMeta { } , metas ... ) , ms ... )
sort . Slice ( repl , func ( i , j int ) bool {
return repl [ i ] . MinTime < repl [ j ] . MinTime
} )
return repl
}
2018-03-28 07:50:52 -07:00
// o1 overlaps with 10-20.
2018-03-28 15:19:22 -07:00
o1 := BlockMeta { MinTime : 15 , MaxTime : 17 }
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 15 , Max : 17 } : { metas [ 1 ] , o1 } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o1 ) ) )
2018-03-28 10:33:41 -07:00
// o2 overlaps with 20-30 and 30-40.
2018-03-28 15:19:22 -07:00
o2 := BlockMeta { MinTime : 21 , MaxTime : 31 }
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 21 , Max : 30 } : { metas [ 2 ] , o2 } ,
{ Min : 30 , Max : 31 } : { o2 , metas [ 3 ] } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o2 ) ) )
2018-03-28 10:33:41 -07:00
// o3a and o3b overlaps with 30-40 and each other.
2018-03-28 15:19:22 -07:00
o3a := BlockMeta { MinTime : 33 , MaxTime : 39 }
o3b := BlockMeta { MinTime : 34 , MaxTime : 36 }
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 34 , Max : 36 } : { metas [ 3 ] , o3a , o3b } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o3a , o3b ) ) )
2018-03-28 10:33:41 -07:00
// o4 is 1:1 overlap with 50-60.
2018-03-28 15:19:22 -07:00
o4 := BlockMeta { MinTime : 50 , MaxTime : 60 }
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 50 , Max : 60 } : { metas [ 5 ] , o4 } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o4 ) ) )
2018-03-28 10:33:41 -07:00
// o5 overlaps with 60-70, 70-80 and 80-90.
2018-03-28 15:19:22 -07:00
o5 := BlockMeta { MinTime : 61 , MaxTime : 85 }
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 61 , Max : 70 } : { metas [ 6 ] , o5 } ,
{ Min : 70 , Max : 80 } : { o5 , metas [ 7 ] } ,
{ Min : 80 , Max : 85 } : { o5 , metas [ 8 ] } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o5 ) ) )
2018-03-28 10:33:41 -07:00
// o6a overlaps with 90-100, 100-110 and o6b, o6b overlaps with 90-100 and o6a.
2018-03-28 15:19:22 -07:00
o6a := BlockMeta { MinTime : 92 , MaxTime : 105 }
o6b := BlockMeta { MinTime : 94 , MaxTime : 99 }
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 94 , Max : 99 } : { metas [ 9 ] , o6a , o6b } ,
{ Min : 100 , Max : 105 } : { o6a , metas [ 10 ] } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o6a , o6b ) ) )
2018-03-28 15:50:42 -07:00
// All together.
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 15 , Max : 17 } : { metas [ 1 ] , o1 } ,
{ Min : 21 , Max : 30 } : { metas [ 2 ] , o2 } , { Min : 30 , Max : 31 } : { o2 , metas [ 3 ] } ,
{ Min : 34 , Max : 36 } : { metas [ 3 ] , o3a , o3b } ,
{ Min : 50 , Max : 60 } : { metas [ 5 ] , o4 } ,
{ Min : 61 , Max : 70 } : { metas [ 6 ] , o5 } , { Min : 70 , Max : 80 } : { o5 , metas [ 7 ] } , { Min : 80 , Max : 85 } : { o5 , metas [ 8 ] } ,
{ Min : 94 , Max : 99 } : { metas [ 9 ] , o6a , o6b } , { Min : 100 , Max : 105 } : { o6a , metas [ 10 ] } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o1 , o2 , o3a , o3b , o4 , o5 , o6a , o6b ) ) )
2018-03-29 04:50:46 -07:00
2018-04-05 05:51:33 -07:00
// Additional case.
2018-03-29 04:50:46 -07:00
var nc1 [ ] BlockMeta
nc1 = append ( nc1 , BlockMeta { MinTime : 1 , MaxTime : 5 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 3 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 3 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 3 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 3 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 6 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 3 , MaxTime : 5 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 5 , MaxTime : 7 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 7 , MaxTime : 10 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 8 , MaxTime : 9 } )
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 2 , Max : 3 } : { nc1 [ 0 ] , nc1 [ 1 ] , nc1 [ 2 ] , nc1 [ 3 ] , nc1 [ 4 ] , nc1 [ 5 ] } , // 1-5, 2-3, 2-3, 2-3, 2-3, 2,6
{ Min : 3 , Max : 5 } : { nc1 [ 0 ] , nc1 [ 5 ] , nc1 [ 6 ] } , // 1-5, 2-6, 3-5
{ Min : 5 , Max : 6 } : { nc1 [ 5 ] , nc1 [ 7 ] } , // 2-6, 5-7
{ Min : 8 , Max : 9 } : { nc1 [ 8 ] , nc1 [ 9 ] } , // 7-10, 8-9
2018-03-29 04:50:46 -07:00
} , OverlappingBlocks ( nc1 ) )
2018-03-28 10:33:41 -07:00
}
2018-06-13 02:24:28 -07:00
2020-03-25 12:26:10 -07:00
// Regression test for https://github.com/prometheus/tsdb/issues/347
2018-06-13 02:24:28 -07:00
func TestChunkAtBlockBoundary ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2018-06-13 02:24:28 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2018-06-13 02:24:28 -07:00
2020-02-06 07:58:38 -08:00
blockRange := db . compactor . ( * LeveledCompactor ) . ranges [ 0 ]
2018-06-13 02:24:28 -07:00
label := labels . FromStrings ( "foo" , "bar" )
for i := int64 ( 0 ) ; i < 3 ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , label , i * blockRange , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , label , i * blockRange + 1000 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-13 02:24:28 -07:00
}
err := app . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-13 02:24:28 -07:00
2020-01-19 23:29:49 -08:00
err = db . Compact ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-13 02:24:28 -07:00
2018-11-07 07:52:41 -08:00
for _ , block := range db . Blocks ( ) {
2020-03-25 12:13:47 -07:00
r , err := block . Index ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-13 02:24:28 -07:00
defer r . Close ( )
meta := block . Meta ( )
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
k , v := index . AllPostingsKey ( )
p , err := r . Postings ( k , v )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-13 02:24:28 -07:00
var (
lset labels . Labels
chks [ ] chunks . Meta
)
chunkCount := 0
for p . Next ( ) {
err = r . Series ( p . At ( ) , & lset , & chks )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-13 02:24:28 -07:00
for _ , c := range chks {
2020-10-29 02:43:23 -07:00
require . True ( t , meta . MinTime <= c . MinTime && c . MaxTime <= meta . MaxTime ,
2018-06-13 02:24:28 -07:00
"chunk spans beyond block boundaries: [block.MinTime=%d, block.MaxTime=%d]; [chunk.MinTime=%d, chunk.MaxTime=%d]" ,
meta . MinTime , meta . MaxTime , c . MinTime , c . MaxTime )
chunkCount ++
}
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1 , chunkCount , "expected 1 chunk in block %s, got %d" , meta . ULID , chunkCount )
2018-06-13 02:24:28 -07:00
}
}
2018-06-14 06:29:32 -07:00
func TestQuerierWithBoundaryChunks ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2018-06-14 06:29:32 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2018-06-14 06:29:32 -07:00
2020-02-06 07:58:38 -08:00
blockRange := db . compactor . ( * LeveledCompactor ) . ranges [ 0 ]
2018-06-14 06:29:32 -07:00
label := labels . FromStrings ( "foo" , "bar" )
for i := int64 ( 0 ) ; i < 5 ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , label , i * blockRange , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "blockID" , strconv . FormatInt ( i , 10 ) ) , i * blockRange , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-14 06:29:32 -07:00
}
err := app . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-14 06:29:32 -07:00
2020-01-19 23:29:49 -08:00
err = db . Compact ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-14 06:29:32 -07:00
2020-10-29 02:43:23 -07:00
require . GreaterOrEqual ( t , len ( db . blocks ) , 3 , "invalid test, less than three blocks in DB" )
2018-06-14 06:29:32 -07:00
2020-02-06 07:58:38 -08:00
q , err := db . Querier ( context . TODO ( ) , blockRange , 2 * blockRange )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-14 06:29:32 -07:00
defer q . Close ( )
2020-07-31 08:03:02 -07:00
// The requested interval covers 2 blocks, so the querier's label values for blockID should give us 2 values, one from each block.
b , ws , err := q . LabelValues ( "blockID" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , storage . Warnings ( nil ) , ws )
require . Equal ( t , [ ] string { "1" , "2" } , b )
2018-06-14 06:29:32 -07:00
}
2018-05-25 14:19:32 -07:00
2018-12-04 02:30:49 -08:00
// TestInitializeHeadTimestamp ensures that the h.minTime is set properly.
// - no blocks no WAL: set to the time of the first appended sample
// - no blocks with WAL: set to the smallest sample from the WAL
// - with blocks no WAL: set to the last block maxT
// - with blocks with WAL: same as above
2018-05-25 14:19:32 -07:00
func TestInitializeHeadTimestamp ( t * testing . T ) {
t . Run ( "clean" , func ( t * testing . T ) {
dir , err := ioutil . TempDir ( "" , "test_head_init" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-19 06:31:57 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( dir ) )
2019-03-19 06:31:57 -07:00
} ( )
2018-05-25 14:19:32 -07:00
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-19 06:31:57 -07:00
defer db . Close ( )
2018-05-25 14:19:32 -07:00
// Should be set to init values if no WAL or blocks exist so far.
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( math . MaxInt64 ) , db . head . MinTime ( ) )
require . Equal ( t , int64 ( math . MinInt64 ) , db . head . MaxTime ( ) )
2018-05-25 14:19:32 -07:00
// First added sample initializes the writable range.
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 1000 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-05-25 14:19:32 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( 1000 ) , db . head . MinTime ( ) )
require . Equal ( t , int64 ( 1000 ) , db . head . MaxTime ( ) )
2018-05-25 14:19:32 -07:00
} )
t . Run ( "wal-only" , func ( t * testing . T ) {
dir , err := ioutil . TempDir ( "" , "test_head_init" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-19 06:31:57 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( dir ) )
2019-03-19 06:31:57 -07:00
} ( )
2018-05-25 14:19:32 -07:00
2021-10-22 01:06:44 -07:00
require . NoError ( t , os . MkdirAll ( path . Join ( dir , "wal" ) , 0 o777 ) )
2019-06-19 06:46:24 -07:00
w , err := wal . New ( nil , nil , path . Join ( dir , "wal" ) , false )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-05-25 14:19:32 -07:00
2019-09-19 02:15:41 -07:00
var enc record . Encoder
2018-05-25 14:19:32 -07:00
err = w . Log (
2019-09-19 02:15:41 -07:00
enc . Series ( [ ] record . RefSeries {
2018-05-25 14:19:32 -07:00
{ Ref : 123 , Labels : labels . FromStrings ( "a" , "1" ) } ,
{ Ref : 124 , Labels : labels . FromStrings ( "a" , "2" ) } ,
} , nil ) ,
2019-09-19 02:15:41 -07:00
enc . Samples ( [ ] record . RefSample {
2018-05-25 14:19:32 -07:00
{ Ref : 123 , T : 5000 , V : 1 } ,
{ Ref : 124 , T : 15000 , V : 1 } ,
} , nil ) ,
)
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , w . Close ( ) )
2018-05-25 14:19:32 -07:00
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-19 06:31:57 -07:00
defer db . Close ( )
2018-05-25 14:19:32 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( 5000 ) , db . head . MinTime ( ) )
require . Equal ( t , int64 ( 15000 ) , db . head . MaxTime ( ) )
2018-05-25 14:19:32 -07:00
} )
t . Run ( "existing-block" , func ( t * testing . T ) {
dir , err := ioutil . TempDir ( "" , "test_head_init" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-19 06:31:57 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( dir ) )
2019-03-19 06:31:57 -07:00
} ( )
2018-05-25 14:19:32 -07:00
2019-01-28 03:24:49 -08:00
createBlock ( t , dir , genSeries ( 1 , 1 , 1000 , 2000 ) )
2018-05-25 14:19:32 -07:00
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-19 06:31:57 -07:00
defer db . Close ( )
2018-05-25 14:19:32 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( 2000 ) , db . head . MinTime ( ) )
require . Equal ( t , int64 ( 2000 ) , db . head . MaxTime ( ) )
2018-05-25 14:19:32 -07:00
} )
t . Run ( "existing-block-and-wal" , func ( t * testing . T ) {
dir , err := ioutil . TempDir ( "" , "test_head_init" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-19 06:31:57 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( dir ) )
2019-03-19 06:31:57 -07:00
} ( )
2018-05-25 14:19:32 -07:00
2019-01-28 03:24:49 -08:00
createBlock ( t , dir , genSeries ( 1 , 1 , 1000 , 6000 ) )
2018-05-25 14:19:32 -07:00
2021-10-22 01:06:44 -07:00
require . NoError ( t , os . MkdirAll ( path . Join ( dir , "wal" ) , 0 o777 ) )
2019-06-19 06:46:24 -07:00
w , err := wal . New ( nil , nil , path . Join ( dir , "wal" ) , false )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-05-25 14:19:32 -07:00
2019-09-19 02:15:41 -07:00
var enc record . Encoder
2018-05-25 14:19:32 -07:00
err = w . Log (
2019-09-19 02:15:41 -07:00
enc . Series ( [ ] record . RefSeries {
2018-05-25 14:19:32 -07:00
{ Ref : 123 , Labels : labels . FromStrings ( "a" , "1" ) } ,
{ Ref : 124 , Labels : labels . FromStrings ( "a" , "2" ) } ,
} , nil ) ,
2019-09-19 02:15:41 -07:00
enc . Samples ( [ ] record . RefSample {
2018-05-25 14:19:32 -07:00
{ Ref : 123 , T : 5000 , V : 1 } ,
{ Ref : 124 , T : 15000 , V : 1 } ,
} , nil ) ,
)
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , w . Close ( ) )
2018-05-25 14:19:32 -07:00
2018-11-28 01:23:50 -08:00
r := prometheus . NewRegistry ( )
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , r , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-19 06:31:57 -07:00
defer db . Close ( )
2018-05-25 14:19:32 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( 6000 ) , db . head . MinTime ( ) )
require . Equal ( t , int64 ( 15000 ) , db . head . MaxTime ( ) )
2018-11-28 01:23:50 -08:00
// Check that old series has been GCed.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . series ) )
2018-05-25 14:19:32 -07:00
} )
}
2018-09-27 04:43:22 -07:00
2019-01-18 00:35:16 -08:00
func TestNoEmptyBlocks ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , [ ] int64 { 100 } )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2019-01-18 00:35:16 -08:00
db . DisableCompactions ( )
2020-02-06 07:58:38 -08:00
rangeToTriggerCompaction := db . compactor . ( * LeveledCompactor ) . ranges [ 0 ] / 2 * 3 - 1
2019-01-18 00:35:16 -08:00
defaultLabel := labels . FromStrings ( "foo" , "bar" )
2019-11-18 11:53:33 -08:00
defaultMatcher := labels . MustNewMatcher ( labels . MatchRegexp , "" , ".*" )
2019-01-18 00:35:16 -08:00
t . Run ( "Test no blocks after compact with empty head." , func ( t * testing . T ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Compact ( ) )
2019-01-18 00:35:16 -08:00
actBlocks , err := blockDirs ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , len ( db . Blocks ( ) ) , len ( actBlocks ) )
require . Equal ( t , 0 , len ( actBlocks ) )
require . Equal ( t , 0 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . ran ) ) , "no compaction should be triggered here" )
2019-01-18 00:35:16 -08:00
} )
t . Run ( "Test no blocks after deleting all samples from head." , func ( t * testing . T ) {
2020-07-24 07:10:51 -07:00
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , defaultLabel , 1 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , 2 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , 3 + rangeToTriggerCompaction , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . NoError ( t , db . Delete ( math . MinInt64 , math . MaxInt64 , defaultMatcher ) )
require . NoError ( t , db . Compact ( ) )
require . Equal ( t , 1 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . ran ) ) , "compaction should have been triggered here" )
2019-01-18 00:35:16 -08:00
actBlocks , err := blockDirs ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , len ( db . Blocks ( ) ) , len ( actBlocks ) )
require . Equal ( t , 0 , len ( actBlocks ) )
2019-01-18 00:35:16 -08:00
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , 1 , 0 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfBounds , err , "the head should be truncated so no samples in the past should be allowed" )
2019-01-18 00:35:16 -08:00
// Adding new blocks.
currentTime := db . Head ( ) . MaxTime ( )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , currentTime , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , currentTime + 1 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , currentTime + rangeToTriggerCompaction , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2019-01-18 00:35:16 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Compact ( ) )
require . Equal ( t , 2 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . ran ) ) , "compaction should have been triggered here" )
2019-01-18 00:35:16 -08:00
actBlocks , err = blockDirs ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , len ( db . Blocks ( ) ) , len ( actBlocks ) )
require . Equal ( t , 1 , len ( actBlocks ) , "No blocks created when compacting with >0 samples" )
2019-01-18 00:35:16 -08:00
} )
2019-03-25 16:38:12 -07:00
t . Run ( ` When no new block is created from head , and there are some blocks on disk
2019-01-18 00:35:16 -08:00
compaction should not run into infinite loop ( was seen during development ) . ` , func ( t * testing . T ) {
oldBlocks := db . Blocks ( )
2020-07-24 07:10:51 -07:00
app := db . Appender ( ctx )
2019-01-18 00:35:16 -08:00
currentTime := db . Head ( ) . MaxTime ( )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , defaultLabel , currentTime , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , currentTime + 1 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , currentTime + rangeToTriggerCompaction , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . NoError ( t , db . head . Delete ( math . MinInt64 , math . MaxInt64 , defaultMatcher ) )
require . NoError ( t , db . Compact ( ) )
require . Equal ( t , 3 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . ran ) ) , "compaction should have been triggered here" )
require . Equal ( t , oldBlocks , db . Blocks ( ) )
2019-01-18 00:35:16 -08:00
} )
t . Run ( "Test no blocks remaining after deleting all samples from disk." , func ( t * testing . T ) {
currentTime := db . Head ( ) . MaxTime ( )
blocks := [ ] * BlockMeta {
2020-02-06 07:58:38 -08:00
{ MinTime : currentTime , MaxTime : currentTime + db . compactor . ( * LeveledCompactor ) . ranges [ 0 ] } ,
{ MinTime : currentTime + 100 , MaxTime : currentTime + 100 + db . compactor . ( * LeveledCompactor ) . ranges [ 0 ] } ,
2019-01-18 00:35:16 -08:00
}
for _ , m := range blocks {
2019-01-28 03:24:49 -08:00
createBlock ( t , db . Dir ( ) , genSeries ( 2 , 2 , m . MinTime , m . MaxTime ) )
2019-01-18 00:35:16 -08:00
}
oldBlocks := db . Blocks ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . reloadBlocks ( ) ) // Reload the db to register the new blocks.
require . Equal ( t , len ( blocks ) + len ( oldBlocks ) , len ( db . Blocks ( ) ) ) // Ensure all blocks are registered.
require . NoError ( t , db . Delete ( math . MinInt64 , math . MaxInt64 , defaultMatcher ) )
require . NoError ( t , db . Compact ( ) )
require . Equal ( t , 5 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . ran ) ) , "compaction should have been triggered here once for each block that have tombstones" )
2019-01-18 00:35:16 -08:00
actBlocks , err := blockDirs ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , len ( db . Blocks ( ) ) , len ( actBlocks ) )
require . Equal ( t , 1 , len ( actBlocks ) , "All samples are deleted. Only the most recent block should remain after compaction." )
2019-01-18 00:35:16 -08:00
} )
}
2018-11-07 07:52:41 -08:00
func TestDB_LabelNames ( t * testing . T ) {
tests := [ ] struct {
// Add 'sampleLabels1' -> Test Head -> Compact -> Test Disk ->
// -> Add 'sampleLabels2' -> Test Head+Disk
sampleLabels1 [ ] [ 2 ] string // For checking head and disk separately.
// To test Head+Disk, sampleLabels2 should have
// at least 1 unique label name which is not in sampleLabels1.
sampleLabels2 [ ] [ 2 ] string // // For checking head and disk together.
exp1 [ ] string // after adding sampleLabels1.
exp2 [ ] string // after adding sampleLabels1 and sampleLabels2.
} {
{
sampleLabels1 : [ ] [ 2 ] string {
2019-08-13 01:34:14 -07:00
{ "name1" , "1" } ,
{ "name3" , "3" } ,
{ "name2" , "2" } ,
2018-11-07 07:52:41 -08:00
} ,
sampleLabels2 : [ ] [ 2 ] string {
2019-08-13 01:34:14 -07:00
{ "name4" , "4" } ,
{ "name1" , "1" } ,
2018-11-07 07:52:41 -08:00
} ,
exp1 : [ ] string { "name1" , "name2" , "name3" } ,
exp2 : [ ] string { "name1" , "name2" , "name3" , "name4" } ,
} ,
{
sampleLabels1 : [ ] [ 2 ] string {
2019-08-13 01:34:14 -07:00
{ "name2" , "2" } ,
{ "name1" , "1" } ,
{ "name2" , "2" } ,
2018-11-07 07:52:41 -08:00
} ,
sampleLabels2 : [ ] [ 2 ] string {
2019-08-13 01:34:14 -07:00
{ "name6" , "6" } ,
{ "name0" , "0" } ,
2018-11-07 07:52:41 -08:00
} ,
exp1 : [ ] string { "name1" , "name2" } ,
exp2 : [ ] string { "name0" , "name1" , "name2" , "name6" } ,
} ,
}
2020-02-06 07:58:38 -08:00
blockRange := int64 ( 1000 )
2018-11-07 07:52:41 -08:00
// Appends samples into the database.
appendSamples := func ( db * DB , mint , maxt int64 , sampleLabels [ ] [ 2 ] string ) {
t . Helper ( )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2018-11-07 07:52:41 -08:00
for i := mint ; i <= maxt ; i ++ {
for _ , tuple := range sampleLabels {
label := labels . FromStrings ( tuple [ 0 ] , tuple [ 1 ] )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , label , i * blockRange , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-11-07 07:52:41 -08:00
}
}
err := app . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-11-07 07:52:41 -08:00
}
for _ , tst := range tests {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2018-11-07 07:52:41 -08:00
appendSamples ( db , 0 , 4 , tst . sampleLabels1 )
// Testing head.
2020-03-25 12:13:47 -07:00
headIndexr , err := db . head . Index ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-11-07 07:52:41 -08:00
labelNames , err := headIndexr . LabelNames ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , tst . exp1 , labelNames )
require . NoError ( t , headIndexr . Close ( ) )
2018-11-07 07:52:41 -08:00
// Testing disk.
2020-01-19 23:29:49 -08:00
err = db . Compact ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-11-07 07:52:41 -08:00
// All blocks have same label names, hence check them individually.
2019-12-08 11:16:46 -08:00
// No need to aggregate and check.
2018-11-07 07:52:41 -08:00
for _ , b := range db . Blocks ( ) {
2020-03-25 12:13:47 -07:00
blockIndexr , err := b . Index ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-11-07 07:52:41 -08:00
labelNames , err = blockIndexr . LabelNames ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , tst . exp1 , labelNames )
require . NoError ( t , blockIndexr . Close ( ) )
2018-11-07 07:52:41 -08:00
}
2019-12-08 11:16:46 -08:00
// Adding more samples to head with new label names
2018-11-16 10:02:24 -08:00
// so that we can test (head+disk).LabelNames() (the union).
2018-11-07 07:52:41 -08:00
appendSamples ( db , 5 , 9 , tst . sampleLabels2 )
// Testing DB (union).
2020-02-06 07:58:38 -08:00
q , err := db . Querier ( context . TODO ( ) , math . MinInt64 , math . MaxInt64 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-06 07:58:38 -08:00
var ws storage . Warnings
labelNames , ws , err = q . LabelNames ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , len ( ws ) )
require . NoError ( t , q . Close ( ) )
require . Equal ( t , tst . exp2 , labelNames )
2018-11-07 07:52:41 -08:00
}
}
2018-09-27 04:43:22 -07:00
func TestCorrectNumTombstones ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2018-09-27 04:43:22 -07:00
2020-02-06 07:58:38 -08:00
blockRange := db . compactor . ( * LeveledCompactor ) . ranges [ 0 ]
2019-01-18 00:35:16 -08:00
defaultLabel := labels . FromStrings ( "foo" , "bar" )
2019-11-18 11:53:33 -08:00
defaultMatcher := labels . MustNewMatcher ( labels . MatchEqual , defaultLabel [ 0 ] . Name , defaultLabel [ 0 ] . Value )
2018-09-27 04:43:22 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2018-09-27 04:43:22 -07:00
for i := int64 ( 0 ) ; i < 3 ; i ++ {
for j := int64 ( 0 ) ; j < 15 ; j ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , defaultLabel , i * blockRange + j , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-27 04:43:22 -07:00
}
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2018-09-27 04:43:22 -07:00
2020-01-19 23:29:49 -08:00
err := db . Compact ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 1 , len ( db . blocks ) )
2018-09-27 04:43:22 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Delete ( 0 , 1 , defaultMatcher ) )
require . Equal ( t , uint64 ( 1 ) , db . blocks [ 0 ] . meta . Stats . NumTombstones )
2018-09-27 04:43:22 -07:00
// {0, 1} and {2, 3} are merged to form 1 tombstone.
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Delete ( 2 , 3 , defaultMatcher ) )
require . Equal ( t , uint64 ( 1 ) , db . blocks [ 0 ] . meta . Stats . NumTombstones )
2018-09-27 04:43:22 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Delete ( 5 , 6 , defaultMatcher ) )
require . Equal ( t , uint64 ( 2 ) , db . blocks [ 0 ] . meta . Stats . NumTombstones )
2018-09-27 04:43:22 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Delete ( 9 , 11 , defaultMatcher ) )
require . Equal ( t , uint64 ( 3 ) , db . blocks [ 0 ] . meta . Stats . NumTombstones )
2018-09-27 04:43:22 -07:00
}
2018-12-04 02:30:49 -08:00
// TestBlockRanges checks the following use cases:
// - No samples can be added with timestamps lower than the last block maxt.
2019-01-28 18:25:12 -08:00
// - The compactor doesn't create overlapping blocks
2018-12-04 02:30:49 -08:00
// even when the last blocks is not within the default boundaries.
2019-01-28 18:25:12 -08:00
// - Lower boundary is based on the smallest sample in the head and
2018-12-04 02:30:49 -08:00
// upper boundary is rounded to the configured block range.
//
// This ensures that a snapshot that includes the head and creates a block with a custom time range
// will not overlap with the first block created by the next compaction.
func TestBlockRanges ( t * testing . T ) {
logger := log . NewLogfmtLogger ( log . NewSyncWriter ( os . Stderr ) )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
2018-12-04 02:30:49 -08:00
dir , err := ioutil . TempDir ( "" , "test_storage" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-12-04 02:30:49 -08:00
// Test that the compactor doesn't create overlapping blocks
// when a non standard block already exists.
firstBlockMaxT := int64 ( 3 )
2019-01-28 03:24:49 -08:00
createBlock ( t , dir , genSeries ( 1 , 1 , 0 , firstBlockMaxT ) )
2021-06-05 07:29:32 -07:00
db , err := open ( dir , logger , nil , DefaultOptions ( ) , [ ] int64 { 10000 } , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-06 07:58:38 -08:00
rangeToTriggerCompaction := db . compactor . ( * LeveledCompactor ) . ranges [ 0 ] / 2 * 3 + 1
2018-12-04 02:30:49 -08:00
defer func ( ) {
os . RemoveAll ( dir )
} ( )
2020-07-24 07:10:51 -07:00
app := db . Appender ( ctx )
2019-08-13 01:34:14 -07:00
lbl := labels . Labels { { Name : "a" , Value : "b" } }
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , firstBlockMaxT - 1 , rand . Float64 ( ) )
2018-12-04 02:30:49 -08:00
if err == nil {
t . Fatalf ( "appending a sample with a timestamp covered by a previous block shouldn't be possible" )
}
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , firstBlockMaxT + 1 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , firstBlockMaxT + 2 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-06 07:58:38 -08:00
secondBlockMaxt := firstBlockMaxT + rangeToTriggerCompaction
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , secondBlockMaxt , rand . Float64 ( ) ) // Add samples to trigger a new compaction
2018-12-04 02:30:49 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2018-12-12 03:49:03 -08:00
for x := 0 ; x < 100 ; x ++ {
2018-12-04 02:30:49 -08:00
if len ( db . Blocks ( ) ) == 2 {
break
}
time . Sleep ( 100 * time . Millisecond )
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , 2 , len ( db . Blocks ( ) ) , "no new block created after the set timeout" )
2018-12-04 02:30:49 -08:00
if db . Blocks ( ) [ 0 ] . Meta ( ) . MaxTime > db . Blocks ( ) [ 1 ] . Meta ( ) . MinTime {
t . Fatalf ( "new block overlaps old:%v,new:%v" , db . Blocks ( ) [ 0 ] . Meta ( ) , db . Blocks ( ) [ 1 ] . Meta ( ) )
}
// Test that wal records are skipped when an existing block covers the same time ranges
// and compaction doesn't create an overlapping block.
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2018-12-04 02:30:49 -08:00
db . DisableCompactions ( )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , secondBlockMaxt + 1 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , secondBlockMaxt + 2 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , secondBlockMaxt + 3 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , secondBlockMaxt + 4 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . NoError ( t , db . Close ( ) )
2018-12-04 02:30:49 -08:00
thirdBlockMaxt := secondBlockMaxt + 2
2019-01-28 03:24:49 -08:00
createBlock ( t , dir , genSeries ( 1 , 1 , secondBlockMaxt + 1 , thirdBlockMaxt ) )
2018-12-04 02:30:49 -08:00
2021-06-05 07:29:32 -07:00
db , err = open ( dir , logger , nil , DefaultOptions ( ) , [ ] int64 { 10000 } , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-06 07:58:38 -08:00
2018-12-04 02:30:49 -08:00
defer db . Close ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 3 , len ( db . Blocks ( ) ) , "db doesn't include expected number of blocks" )
require . Equal ( t , db . Blocks ( ) [ 2 ] . Meta ( ) . MaxTime , thirdBlockMaxt , "unexpected maxt of the last block" )
2018-12-04 02:30:49 -08:00
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , thirdBlockMaxt + rangeToTriggerCompaction , rand . Float64 ( ) ) // Trigger a compaction
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2018-12-12 03:49:03 -08:00
for x := 0 ; x < 100 ; x ++ {
2018-12-04 02:30:49 -08:00
if len ( db . Blocks ( ) ) == 4 {
break
}
time . Sleep ( 100 * time . Millisecond )
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , 4 , len ( db . Blocks ( ) ) , "no new block created after the set timeout" )
2018-12-04 02:30:49 -08:00
if db . Blocks ( ) [ 2 ] . Meta ( ) . MaxTime > db . Blocks ( ) [ 3 ] . Meta ( ) . MinTime {
t . Fatalf ( "new block overlaps old:%v,new:%v" , db . Blocks ( ) [ 2 ] . Meta ( ) , db . Blocks ( ) [ 3 ] . Meta ( ) )
}
}
2019-07-23 01:04:48 -07:00
// TestDBReadOnly ensures that opening a DB in readonly mode doesn't modify any files on the disk.
// It also checks that the API calls return equivalent results as a normal db.Open() mode.
func TestDBReadOnly ( t * testing . T ) {
var (
2020-07-31 08:03:02 -07:00
dbDir string
logger = log . NewLogfmtLogger ( log . NewSyncWriter ( os . Stderr ) )
expBlocks [ ] * Block
expSeries map [ string ] [ ] tsdbutil . Sample
expChunks map [ string ] [ ] chunks . Meta
expDBHash [ ] byte
matchAll = labels . MustNewMatcher ( labels . MatchEqual , "" , "" )
err error
2019-07-23 01:04:48 -07:00
)
2019-10-10 02:47:30 -07:00
// Bootstrap the db.
2019-07-23 01:04:48 -07:00
{
dbDir , err = ioutil . TempDir ( "" , "test" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-07-23 01:04:48 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( dbDir ) )
2019-07-23 01:04:48 -07:00
} ( )
dbBlocks := [ ] * BlockMeta {
2020-07-31 08:03:02 -07:00
// Create three 2-sample blocks.
{ MinTime : 10 , MaxTime : 12 } ,
{ MinTime : 12 , MaxTime : 14 } ,
{ MinTime : 14 , MaxTime : 16 } ,
2019-07-23 01:04:48 -07:00
}
for _ , m := range dbBlocks {
2020-07-31 08:03:02 -07:00
_ = createBlock ( t , dbDir , genSeries ( 1 , 1 , m . MinTime , m . MaxTime ) )
2019-07-23 01:04:48 -07:00
}
2020-07-31 08:03:02 -07:00
// Add head to test DBReadOnly WAL reading capabilities.
w , err := wal . New ( logger , nil , filepath . Join ( dbDir , "wal" ) , true )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-31 08:03:02 -07:00
h := createHead ( t , w , genSeries ( 1 , 1 , 16 , 18 ) , dbDir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2019-07-23 01:04:48 -07:00
}
// Open a normal db to use for a comparison.
{
2021-06-05 07:29:32 -07:00
dbWritable , err := Open ( dbDir , logger , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-07-23 01:04:48 -07:00
dbWritable . DisableCompactions ( )
2019-11-11 18:40:16 -08:00
dbSizeBeforeAppend , err := fileutil . DirSize ( dbWritable . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-24 07:10:51 -07:00
app := dbWritable . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , dbWritable . Head ( ) . MaxTime ( ) + 1 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2019-07-23 01:04:48 -07:00
expBlocks = dbWritable . Blocks ( )
2019-11-11 18:40:16 -08:00
expDbSize , err := fileutil . DirSize ( dbWritable . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Greater ( t , expDbSize , dbSizeBeforeAppend , "db size didn't increase after an append" )
2019-07-23 01:04:48 -07:00
2020-02-06 07:58:38 -08:00
q , err := dbWritable . Querier ( context . TODO ( ) , math . MinInt64 , math . MaxInt64 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-07-23 01:04:48 -07:00
expSeries = query ( t , q , matchAll )
2020-07-31 08:03:02 -07:00
cq , err := dbWritable . ChunkQuerier ( context . TODO ( ) , math . MinInt64 , math . MaxInt64 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-31 08:03:02 -07:00
expChunks = queryChunks ( t , cq , matchAll )
2019-07-23 01:04:48 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , dbWritable . Close ( ) ) // Close here to allow getting the dir hash for windows.
2019-07-23 01:04:48 -07:00
expDBHash = testutil . DirHash ( t , dbWritable . Dir ( ) )
}
// Open a read only db and ensure that the API returns the same result as the normal DB.
2020-07-31 08:03:02 -07:00
dbReadOnly , err := OpenDBReadOnly ( dbDir , logger )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , dbReadOnly . Close ( ) ) } ( )
2020-07-31 08:03:02 -07:00
t . Run ( "blocks" , func ( t * testing . T ) {
2019-07-23 01:04:48 -07:00
blocks , err := dbReadOnly . Blocks ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , len ( expBlocks ) , len ( blocks ) )
2019-07-23 01:04:48 -07:00
for i , expBlock := range expBlocks {
2020-10-29 02:43:23 -07:00
require . Equal ( t , expBlock . Meta ( ) , blocks [ i ] . Meta ( ) , "block meta mismatch" )
2019-07-23 01:04:48 -07:00
}
2020-07-31 08:03:02 -07:00
} )
2019-07-23 01:04:48 -07:00
2020-07-31 08:03:02 -07:00
t . Run ( "querier" , func ( t * testing . T ) {
// Open a read only db and ensure that the API returns the same result as the normal DB.
2020-02-06 07:58:38 -08:00
q , err := dbReadOnly . Querier ( context . TODO ( ) , math . MinInt64 , math . MaxInt64 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-07-23 01:04:48 -07:00
readOnlySeries := query ( t , q , matchAll )
readOnlyDBHash := testutil . DirHash ( t , dbDir )
2020-10-29 02:43:23 -07:00
require . Equal ( t , len ( expSeries ) , len ( readOnlySeries ) , "total series mismatch" )
require . Equal ( t , expSeries , readOnlySeries , "series mismatch" )
require . Equal ( t , expDBHash , readOnlyDBHash , "after all read operations the db hash should remain the same" )
2020-07-31 08:03:02 -07:00
} )
t . Run ( "chunk querier" , func ( t * testing . T ) {
cq , err := dbReadOnly . ChunkQuerier ( context . TODO ( ) , math . MinInt64 , math . MaxInt64 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-31 08:03:02 -07:00
readOnlySeries := queryChunks ( t , cq , matchAll )
readOnlyDBHash := testutil . DirHash ( t , dbDir )
2020-10-29 02:43:23 -07:00
require . Equal ( t , len ( expChunks ) , len ( readOnlySeries ) , "total series mismatch" )
require . Equal ( t , expChunks , readOnlySeries , "series chunks mismatch" )
require . Equal ( t , expDBHash , readOnlyDBHash , "after all read operations the db hash should remain the same" )
2020-07-31 08:03:02 -07:00
} )
2019-07-23 01:04:48 -07:00
}
// TestDBReadOnlyClosing ensures that after closing the db
// all api methods return an ErrClosed.
func TestDBReadOnlyClosing ( t * testing . T ) {
dbDir , err := ioutil . TempDir ( "" , "test" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-07-23 01:04:48 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( dbDir ) )
2019-07-23 01:04:48 -07:00
} ( )
db , err := OpenDBReadOnly ( dbDir , log . NewLogfmtLogger ( log . NewSyncWriter ( os . Stderr ) ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , db . Close ( ) )
require . Equal ( t , db . Close ( ) , ErrClosed )
2019-07-23 01:04:48 -07:00
_ , err = db . Blocks ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , err , ErrClosed )
2020-02-06 07:58:38 -08:00
_ , err = db . Querier ( context . TODO ( ) , 0 , 1 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , err , ErrClosed )
2019-07-23 01:04:48 -07:00
}
2019-09-13 03:25:21 -07:00
func TestDBReadOnly_FlushWAL ( t * testing . T ) {
var (
dbDir string
logger = log . NewLogfmtLogger ( log . NewSyncWriter ( os . Stderr ) )
err error
maxt int
2020-07-24 08:10:13 -07:00
ctx = context . Background ( )
2019-09-13 03:25:21 -07:00
)
2019-10-10 02:47:30 -07:00
// Bootstrap the db.
2019-09-13 03:25:21 -07:00
{
dbDir , err = ioutil . TempDir ( "" , "test" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-09-13 03:25:21 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( dbDir ) )
2019-09-13 03:25:21 -07:00
} ( )
// Append data to the WAL.
2021-06-05 07:29:32 -07:00
db , err := Open ( dbDir , logger , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-09-13 03:25:21 -07:00
db . DisableCompactions ( )
2020-07-24 07:10:51 -07:00
app := db . Appender ( ctx )
2019-09-13 03:25:21 -07:00
maxt = 1000
for i := 0 ; i < maxt ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( defaultLabelName , "flush" ) , int64 ( i ) , 1.0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-09-13 03:25:21 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
defer func ( ) { require . NoError ( t , db . Close ( ) ) } ( )
2019-09-13 03:25:21 -07:00
}
// Flush WAL.
db , err := OpenDBReadOnly ( dbDir , logger )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-09-13 03:25:21 -07:00
flush , err := ioutil . TempDir ( "" , "flush" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-09-13 03:25:21 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( flush ) )
2019-09-13 03:25:21 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . FlushWAL ( flush ) )
require . NoError ( t , db . Close ( ) )
2019-09-13 03:25:21 -07:00
// Reopen the DB from the flushed WAL block.
db , err = OpenDBReadOnly ( flush , logger )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , db . Close ( ) ) } ( )
2019-09-13 03:25:21 -07:00
blocks , err := db . Blocks ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , len ( blocks ) , 1 )
2019-09-13 03:25:21 -07:00
2020-02-06 07:58:38 -08:00
querier , err := db . Querier ( context . TODO ( ) , 0 , int64 ( maxt ) - 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , querier . Close ( ) ) } ( )
2019-09-13 03:25:21 -07:00
// Sum the values.
2020-06-09 09:57:31 -07:00
seriesSet := querier . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , defaultLabelName , "flush" ) )
2019-09-13 03:25:21 -07:00
sum := 0.0
for seriesSet . Next ( ) {
series := seriesSet . At ( ) . Iterator ( )
for series . Next ( ) {
_ , v := series . At ( )
sum += v
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , series . Err ( ) )
2019-09-13 03:25:21 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , seriesSet . Err ( ) )
require . Equal ( t , 0 , len ( seriesSet . Warnings ( ) ) )
require . Equal ( t , 1000.0 , sum )
2019-09-13 03:25:21 -07:00
}
2019-12-03 23:37:49 -08:00
2020-02-12 11:22:27 -08:00
func TestDBCannotSeePartialCommits ( t * testing . T ) {
tmpdir , _ := ioutil . TempDir ( "" , "test" )
2020-07-16 23:17:32 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( tmpdir ) )
2020-07-16 23:17:32 -07:00
} ( )
2020-02-12 11:22:27 -08:00
2021-06-05 07:29:32 -07:00
db , err := Open ( tmpdir , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
defer db . Close ( )
stop := make ( chan struct { } )
firstInsert := make ( chan struct { } )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
2020-02-12 11:22:27 -08:00
// Insert data in batches.
go func ( ) {
iter := 0
for {
2020-07-24 07:10:51 -07:00
app := db . Appender ( ctx )
2020-02-12 11:22:27 -08:00
for j := 0 ; j < 100 ; j ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" , "a" , strconv . Itoa ( j ) ) , int64 ( iter ) , float64 ( iter ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
}
err = app . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
if iter == 0 {
close ( firstInsert )
}
iter ++
select {
case <- stop :
return
default :
}
}
} ( )
<- firstInsert
// This is a race condition, so do a few tests to tickle it.
// Usually most will fail.
inconsistencies := 0
for i := 0 ; i < 10 ; i ++ {
func ( ) {
querier , err := db . Querier ( context . Background ( ) , 0 , 1000000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
defer querier . Close ( )
2020-06-09 09:57:31 -07:00
ss := querier . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
_ , seriesSet , ws , err := expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , len ( ws ) )
2020-02-12 11:22:27 -08:00
values := map [ float64 ] struct { } { }
for _ , series := range seriesSet {
values [ series [ len ( series ) - 1 ] . v ] = struct { } { }
}
if len ( values ) != 1 {
inconsistencies ++
}
} ( )
}
stop <- struct { } { }
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0 , inconsistencies , "Some queries saw inconsistent results." )
2020-02-12 11:22:27 -08:00
}
func TestDBQueryDoesntSeeAppendsAfterCreation ( t * testing . T ) {
tmpdir , _ := ioutil . TempDir ( "" , "test" )
2020-07-16 23:17:32 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( tmpdir ) )
2020-07-16 23:17:32 -07:00
} ( )
2020-02-12 11:22:27 -08:00
2021-06-05 07:29:32 -07:00
db , err := Open ( tmpdir , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
defer db . Close ( )
querierBeforeAdd , err := db . Querier ( context . Background ( ) , 0 , 1000000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
defer querierBeforeAdd . Close ( )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
querierAfterAddButBeforeCommit , err := db . Querier ( context . Background ( ) , 0 , 1000000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
defer querierAfterAddButBeforeCommit . Close ( )
// None of the queriers should return anything after the Add but before the commit.
2020-06-09 09:57:31 -07:00
ss := querierBeforeAdd . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
_ , seriesSet , ws , err := expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , len ( ws ) )
require . Equal ( t , map [ string ] [ ] sample { } , seriesSet )
2020-02-12 11:22:27 -08:00
2020-06-09 09:57:31 -07:00
ss = querierAfterAddButBeforeCommit . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
_ , seriesSet , ws , err = expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , len ( ws ) )
require . Equal ( t , map [ string ] [ ] sample { } , seriesSet )
2020-02-12 11:22:27 -08:00
// This commit is after the queriers are created, so should not be returned.
err = app . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
// Nothing returned for querier created before the Add.
2020-06-09 09:57:31 -07:00
ss = querierBeforeAdd . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
_ , seriesSet , ws , err = expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , len ( ws ) )
require . Equal ( t , map [ string ] [ ] sample { } , seriesSet )
2020-02-12 11:22:27 -08:00
// Series exists but has no samples for querier created after Add.
2020-06-09 09:57:31 -07:00
ss = querierAfterAddButBeforeCommit . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
_ , seriesSet , ws , err = expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , len ( ws ) )
require . Equal ( t , map [ string ] [ ] sample { ` { foo="bar"} ` : { } } , seriesSet )
2020-02-12 11:22:27 -08:00
querierAfterCommit , err := db . Querier ( context . Background ( ) , 0 , 1000000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
defer querierAfterCommit . Close ( )
// Samples are returned for querier created after Commit.
2020-06-09 09:57:31 -07:00
ss = querierAfterCommit . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
_ , seriesSet , ws , err = expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , len ( ws ) )
require . Equal ( t , map [ string ] [ ] sample { ` { foo="bar"} ` : { { t : 0 , v : 0 } } } , seriesSet )
2020-02-12 11:22:27 -08:00
}
2019-12-24 13:55:22 -08:00
// TestChunkWriter_ReadAfterWrite ensures that chunk segment are cut at the set segment size and
2019-12-03 23:37:49 -08:00
// that the resulted segments includes the expected chunks data.
2019-12-24 13:55:22 -08:00
func TestChunkWriter_ReadAfterWrite ( t * testing . T ) {
2019-12-03 23:37:49 -08:00
chk1 := tsdbutil . ChunkFromSamples ( [ ] tsdbutil . Sample { sample { 1 , 1 } } )
chk2 := tsdbutil . ChunkFromSamples ( [ ] tsdbutil . Sample { sample { 1 , 2 } } )
chk3 := tsdbutil . ChunkFromSamples ( [ ] tsdbutil . Sample { sample { 1 , 3 } } )
chk4 := tsdbutil . ChunkFromSamples ( [ ] tsdbutil . Sample { sample { 1 , 4 } } )
chk5 := tsdbutil . ChunkFromSamples ( [ ] tsdbutil . Sample { sample { 1 , 5 } } )
chunkSize := len ( chk1 . Chunk . Bytes ( ) ) + chunks . MaxChunkLengthFieldSize + chunks . ChunkEncodingSize + crc32 . Size
tests := [ ] struct {
chks [ ] [ ] chunks . Meta
segmentSize ,
expSegmentsCount int
expSegmentSizes [ ] int
} {
// 0:Last chunk ends at the segment boundary so
// all chunks should fit in a single segment.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
chk2 ,
chk3 ,
} ,
} ,
segmentSize : 3 * chunkSize ,
expSegmentSizes : [ ] int { 3 * chunkSize } ,
expSegmentsCount : 1 ,
} ,
// 1:Two chunks can fit in a single segment so the last one should result in a new segment.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
chk2 ,
chk3 ,
chk4 ,
chk5 ,
} ,
} ,
segmentSize : 2 * chunkSize ,
expSegmentSizes : [ ] int { 2 * chunkSize , 2 * chunkSize , chunkSize } ,
expSegmentsCount : 3 ,
} ,
// 2:When the segment size is smaller than the size of 2 chunks
// the last segment should still create a new segment.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
chk2 ,
chk3 ,
} ,
} ,
segmentSize : 2 * chunkSize - 1 ,
expSegmentSizes : [ ] int { chunkSize , chunkSize , chunkSize } ,
expSegmentsCount : 3 ,
} ,
// 3:When the segment is smaller than a single chunk
// it should still be written by ignoring the max segment size.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
} ,
} ,
segmentSize : chunkSize - 1 ,
expSegmentSizes : [ ] int { chunkSize } ,
expSegmentsCount : 1 ,
} ,
// 4:All chunks are bigger than the max segment size, but
// these should still be written even when this will result in bigger segment than the set size.
// Each segment will hold a single chunk.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
chk2 ,
chk3 ,
} ,
} ,
segmentSize : 1 ,
expSegmentSizes : [ ] int { chunkSize , chunkSize , chunkSize } ,
expSegmentsCount : 3 ,
} ,
// 5:Adding multiple batches of chunks.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
chk2 ,
chk3 ,
} ,
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk4 ,
chk5 ,
} ,
} ,
segmentSize : 3 * chunkSize ,
expSegmentSizes : [ ] int { 3 * chunkSize , 2 * chunkSize } ,
expSegmentsCount : 2 ,
} ,
// 6:Adding multiple batches of chunks.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
} ,
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk2 ,
chk3 ,
} ,
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk4 ,
} ,
} ,
segmentSize : 2 * chunkSize ,
expSegmentSizes : [ ] int { 2 * chunkSize , 2 * chunkSize } ,
expSegmentsCount : 2 ,
} ,
}
for i , test := range tests {
t . Run ( strconv . Itoa ( i ) , func ( t * testing . T ) {
2019-12-24 13:55:22 -08:00
tempDir , err := ioutil . TempDir ( "" , "test_chunk_writer" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , os . RemoveAll ( tempDir ) ) } ( )
2019-12-03 23:37:49 -08:00
2019-12-24 13:55:22 -08:00
chunkw , err := chunks . NewWriterWithSegSize ( tempDir , chunks . SegmentHeaderSize + int64 ( test . segmentSize ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-12-03 23:37:49 -08:00
for _ , chks := range test . chks {
2020-10-29 02:43:23 -07:00
require . NoError ( t , chunkw . WriteChunks ( chks ... ) )
2019-12-03 23:37:49 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , chunkw . Close ( ) )
2019-12-03 23:37:49 -08:00
2019-12-24 13:55:22 -08:00
files , err := ioutil . ReadDir ( tempDir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , test . expSegmentsCount , len ( files ) , "expected segments count mismatch" )
2019-12-03 23:37:49 -08:00
// Verify that all data is written to the segments.
sizeExp := 0
sizeAct := 0
for _ , chks := range test . chks {
for _ , chk := range chks {
l := make ( [ ] byte , binary . MaxVarintLen32 )
sizeExp += binary . PutUvarint ( l , uint64 ( len ( chk . Chunk . Bytes ( ) ) ) ) // The length field.
sizeExp += chunks . ChunkEncodingSize
sizeExp += len ( chk . Chunk . Bytes ( ) ) // The data itself.
sizeExp += crc32 . Size // The 4 bytes of crc32
}
}
sizeExp += test . expSegmentsCount * chunks . SegmentHeaderSize // The segment header bytes.
for i , f := range files {
size := int ( f . Size ( ) )
// Verify that the segment is the same or smaller than the expected size.
2020-10-29 02:43:23 -07:00
require . GreaterOrEqual ( t , chunks . SegmentHeaderSize + test . expSegmentSizes [ i ] , size , "Segment:%v should NOT be bigger than:%v actual:%v" , i , chunks . SegmentHeaderSize + test . expSegmentSizes [ i ] , size )
2019-12-03 23:37:49 -08:00
sizeAct += size
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , sizeExp , sizeAct )
2019-12-03 23:37:49 -08:00
// Check the content of the chunks.
2019-12-24 13:55:22 -08:00
r , err := chunks . NewDirReader ( tempDir , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , r . Close ( ) ) } ( )
2019-12-03 23:37:49 -08:00
for _ , chks := range test . chks {
for _ , chkExp := range chks {
chkAct , err := r . Chunk ( chkExp . Ref )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , chkExp . Chunk . Bytes ( ) , chkAct . Bytes ( ) )
2019-12-03 23:37:49 -08:00
}
}
} )
}
}
2019-12-24 13:55:22 -08:00
2020-08-25 08:16:43 -07:00
func TestRangeForTimestamp ( t * testing . T ) {
type args struct {
t int64
width int64
}
tests := [ ] struct {
args args
expected int64
} {
{ args { 0 , 5 } , 5 } ,
{ args { 1 , 5 } , 5 } ,
{ args { 5 , 5 } , 10 } ,
{ args { 6 , 5 } , 10 } ,
{ args { 13 , 5 } , 15 } ,
{ args { 95 , 5 } , 100 } ,
}
for _ , tt := range tests {
got := rangeForTimestamp ( tt . args . t , tt . args . width )
2020-10-29 02:43:23 -07:00
require . Equal ( t , tt . expected , got )
2020-08-25 08:16:43 -07:00
}
}
2019-12-24 13:55:22 -08:00
// TestChunkReader_ConcurrentReads checks that the chunk result can be read concurrently.
// Regression test for https://github.com/prometheus/prometheus/pull/6514.
func TestChunkReader_ConcurrentReads ( t * testing . T ) {
chks := [ ] chunks . Meta {
tsdbutil . ChunkFromSamples ( [ ] tsdbutil . Sample { sample { 1 , 1 } } ) ,
tsdbutil . ChunkFromSamples ( [ ] tsdbutil . Sample { sample { 1 , 2 } } ) ,
tsdbutil . ChunkFromSamples ( [ ] tsdbutil . Sample { sample { 1 , 3 } } ) ,
tsdbutil . ChunkFromSamples ( [ ] tsdbutil . Sample { sample { 1 , 4 } } ) ,
tsdbutil . ChunkFromSamples ( [ ] tsdbutil . Sample { sample { 1 , 5 } } ) ,
}
tempDir , err := ioutil . TempDir ( "" , "test_chunk_writer" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , os . RemoveAll ( tempDir ) ) } ( )
2019-12-24 13:55:22 -08:00
chunkw , err := chunks . NewWriter ( tempDir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-12-24 13:55:22 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , chunkw . WriteChunks ( chks ... ) )
require . NoError ( t , chunkw . Close ( ) )
2019-12-24 13:55:22 -08:00
r , err := chunks . NewDirReader ( tempDir , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-12-24 13:55:22 -08:00
var wg sync . WaitGroup
for _ , chk := range chks {
for i := 0 ; i < 100 ; i ++ {
wg . Add ( 1 )
go func ( chunk chunks . Meta ) {
defer wg . Done ( )
chkAct , err := r . Chunk ( chunk . Ref )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , chunk . Chunk . Bytes ( ) , chkAct . Bytes ( ) )
2019-12-24 13:55:22 -08:00
} ( chk )
}
wg . Wait ( )
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , r . Close ( ) )
2019-12-24 13:55:22 -08:00
}
2020-06-12 03:29:26 -07:00
// TestCompactHead ensures that the head compaction
// creates a block that is ready for loading and
// does not cause data loss.
// This test:
// * opens a storage;
// * appends values;
// * compacts the head; and
// * queries the db to ensure the samples are present from the compacted head.
func TestCompactHead ( t * testing . T ) {
dbDir , err := ioutil . TempDir ( "" , "testFlush" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , os . RemoveAll ( dbDir ) ) } ( )
2020-06-12 03:29:26 -07:00
// Open a DB and append data to the WAL.
tsdbCfg := & Options {
RetentionDuration : int64 ( time . Hour * 24 * 15 / time . Millisecond ) ,
NoLockfile : true ,
MinBlockDuration : int64 ( time . Hour * 2 / time . Millisecond ) ,
MaxBlockDuration : int64 ( time . Hour * 2 / time . Millisecond ) ,
WALCompression : true ,
}
2021-06-05 07:29:32 -07:00
db , err := Open ( dbDir , log . NewNopLogger ( ) , prometheus . NewRegistry ( ) , tsdbCfg , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2020-06-12 03:29:26 -07:00
var expSamples [ ] sample
maxt := 100
for i := 0 ; i < maxt ; i ++ {
val := rand . Float64 ( )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , int64 ( i ) , val )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-06-12 03:29:26 -07:00
expSamples = append ( expSamples , sample { int64 ( i ) , val } )
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-06-12 03:29:26 -07:00
// Compact the Head to create a new block.
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . CompactHead ( NewRangeHead ( db . Head ( ) , 0 , int64 ( maxt ) - 1 ) ) )
require . NoError ( t , db . Close ( ) )
2020-06-12 03:29:26 -07:00
// Delete everything but the new block and
// reopen the db to query it to ensure it includes the head data.
2020-10-29 02:43:23 -07:00
require . NoError ( t , deleteNonBlocks ( db . Dir ( ) ) )
2021-06-05 07:29:32 -07:00
db , err = Open ( dbDir , log . NewNopLogger ( ) , prometheus . NewRegistry ( ) , tsdbCfg , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 1 , len ( db . Blocks ( ) ) )
require . Equal ( t , int64 ( maxt ) , db . Head ( ) . MinTime ( ) )
defer func ( ) { require . NoError ( t , db . Close ( ) ) } ( )
2020-06-12 03:29:26 -07:00
querier , err := db . Querier ( context . Background ( ) , 0 , int64 ( maxt ) - 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , querier . Close ( ) ) } ( )
2020-06-12 03:29:26 -07:00
2020-06-14 09:37:23 -07:00
seriesSet := querier . Select ( false , nil , & labels . Matcher { Type : labels . MatchEqual , Name : "a" , Value : "b" } )
2020-06-12 03:29:26 -07:00
var actSamples [ ] sample
for seriesSet . Next ( ) {
series := seriesSet . At ( ) . Iterator ( )
for series . Next ( ) {
time , val := series . At ( )
actSamples = append ( actSamples , sample { int64 ( time ) , val } )
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , series . Err ( ) )
2020-06-12 03:29:26 -07:00
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , expSamples , actSamples )
require . NoError ( t , seriesSet . Err ( ) )
2020-06-12 03:29:26 -07:00
}
func deleteNonBlocks ( dbDir string ) error {
dirs , err := ioutil . ReadDir ( dbDir )
if err != nil {
return err
}
for _ , dir := range dirs {
if ok := isBlockDir ( dir ) ; ! ok {
if err := os . RemoveAll ( filepath . Join ( dbDir , dir . Name ( ) ) ) ; err != nil {
return err
}
}
}
dirs , err = ioutil . ReadDir ( dbDir )
if err != nil {
return err
}
for _ , dir := range dirs {
if ok := isBlockDir ( dir ) ; ! ok {
return errors . Errorf ( "root folder:%v still hase non block directory:%v" , dbDir , dir . Name ( ) )
}
}
return nil
}
2020-08-10 22:56:08 -07:00
func TestOpen_VariousBlockStates ( t * testing . T ) {
tmpDir , err := ioutil . TempDir ( "" , "test" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-08-10 22:56:08 -07:00
t . Cleanup ( func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( tmpDir ) )
2020-08-10 22:56:08 -07:00
} )
var (
expectedLoadedDirs = map [ string ] struct { } { }
expectedRemovedDirs = map [ string ] struct { } { }
expectedIgnoredDirs = map [ string ] struct { } { }
)
{
// Ok blocks; should be loaded.
expectedLoadedDirs [ createBlock ( t , tmpDir , genSeries ( 10 , 2 , 0 , 10 ) ) ] = struct { } { }
expectedLoadedDirs [ createBlock ( t , tmpDir , genSeries ( 10 , 2 , 10 , 20 ) ) ] = struct { } { }
}
{
// Block to repair; should be repaired & loaded.
dbDir := filepath . Join ( "testdata" , "repair_index_version" , "01BZJ9WJQPWHGNC2W4J9TA62KC" )
outDir := filepath . Join ( tmpDir , "01BZJ9WJQPWHGNC2W4J9TA62KC" )
expectedLoadedDirs [ outDir ] = struct { } { }
// Touch chunks dir in block.
2021-10-22 01:06:44 -07:00
require . NoError ( t , os . MkdirAll ( filepath . Join ( dbDir , "chunks" ) , 0 o777 ) )
2020-08-10 22:56:08 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( filepath . Join ( dbDir , "chunks" ) ) )
2020-08-10 22:56:08 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . Mkdir ( outDir , os . ModePerm ) )
require . NoError ( t , fileutil . CopyDirs ( dbDir , outDir ) )
2020-08-10 22:56:08 -07:00
}
{
// Missing meta.json; should be ignored and only logged.
// TODO(bwplotka): Probably add metric.
dir := createBlock ( t , tmpDir , genSeries ( 10 , 2 , 20 , 30 ) )
expectedIgnoredDirs [ dir ] = struct { } { }
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . Remove ( filepath . Join ( dir , metaFilename ) ) )
2020-08-10 22:56:08 -07:00
}
{
2021-01-09 01:02:26 -08:00
// Tmp blocks during creation; those should be removed on start.
2020-08-10 22:56:08 -07:00
dir := createBlock ( t , tmpDir , genSeries ( 10 , 2 , 30 , 40 ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , fileutil . Replace ( dir , dir + tmpForCreationBlockDirSuffix ) )
2020-08-10 22:56:08 -07:00
expectedRemovedDirs [ dir + tmpForCreationBlockDirSuffix ] = struct { } { }
2021-01-09 01:02:26 -08:00
// Tmp blocks during deletion; those should be removed on start.
2020-08-10 22:56:08 -07:00
dir = createBlock ( t , tmpDir , genSeries ( 10 , 2 , 40 , 50 ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , fileutil . Replace ( dir , dir + tmpForDeletionBlockDirSuffix ) )
2020-08-10 22:56:08 -07:00
expectedRemovedDirs [ dir + tmpForDeletionBlockDirSuffix ] = struct { } { }
2021-01-09 01:02:26 -08:00
// Pre-2.21 tmp blocks; those should be removed on start.
dir = createBlock ( t , tmpDir , genSeries ( 10 , 2 , 50 , 60 ) )
require . NoError ( t , fileutil . Replace ( dir , dir + tmpLegacy ) )
expectedRemovedDirs [ dir + tmpLegacy ] = struct { } { }
2020-08-10 22:56:08 -07:00
}
2020-08-11 07:53:23 -07:00
{
// One ok block; but two should be replaced.
dir := createBlock ( t , tmpDir , genSeries ( 10 , 2 , 50 , 60 ) )
expectedLoadedDirs [ dir ] = struct { } { }
m , _ , err := readMetaFile ( dir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-08-11 07:53:23 -07:00
compacted := createBlock ( t , tmpDir , genSeries ( 10 , 2 , 50 , 55 ) )
expectedRemovedDirs [ compacted ] = struct { } { }
m . Compaction . Parents = append ( m . Compaction . Parents ,
BlockDesc { ULID : ulid . MustParse ( filepath . Base ( compacted ) ) } ,
BlockDesc { ULID : ulid . MustNew ( 1 , nil ) } ,
BlockDesc { ULID : ulid . MustNew ( 123 , nil ) } ,
)
// Regression test: Already removed parent can be still in list, which was causing Open errors.
m . Compaction . Parents = append ( m . Compaction . Parents , BlockDesc { ULID : ulid . MustParse ( filepath . Base ( compacted ) ) } )
m . Compaction . Parents = append ( m . Compaction . Parents , BlockDesc { ULID : ulid . MustParse ( filepath . Base ( compacted ) ) } )
_ , err = writeMetaFile ( log . NewLogfmtLogger ( os . Stderr ) , dir , m )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-08-11 07:53:23 -07:00
}
2020-08-10 22:56:08 -07:00
opts := DefaultOptions ( )
opts . RetentionDuration = 0
2021-06-05 07:29:32 -07:00
db , err := Open ( tmpDir , log . NewLogfmtLogger ( os . Stderr ) , nil , opts , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-08-10 22:56:08 -07:00
loadedBlocks := db . Blocks ( )
var loaded int
for _ , l := range loadedBlocks {
if _ , ok := expectedLoadedDirs [ filepath . Join ( tmpDir , l . meta . ULID . String ( ) ) ] ; ! ok {
t . Fatal ( "unexpected block" , l . meta . ULID , "was loaded" )
}
loaded ++
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , len ( expectedLoadedDirs ) , loaded )
require . NoError ( t , db . Close ( ) )
2020-08-10 22:56:08 -07:00
files , err := ioutil . ReadDir ( tmpDir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-08-10 22:56:08 -07:00
var ignored int
for _ , f := range files {
if _ , ok := expectedRemovedDirs [ filepath . Join ( tmpDir , f . Name ( ) ) ] ; ok {
t . Fatal ( "expected" , filepath . Join ( tmpDir , f . Name ( ) ) , "to be removed, but still exists" )
}
if _ , ok := expectedIgnoredDirs [ filepath . Join ( tmpDir , f . Name ( ) ) ] ; ok {
ignored ++
}
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , len ( expectedIgnoredDirs ) , ignored )
2020-08-10 22:56:08 -07:00
}
2020-10-19 08:27:08 -07:00
func TestOneCheckpointPerCompactCall ( t * testing . T ) {
blockRange := int64 ( 1000 )
tsdbCfg := & Options {
RetentionDuration : blockRange * 1000 ,
NoLockfile : true ,
MinBlockDuration : blockRange ,
MaxBlockDuration : blockRange ,
}
tmpDir , err := ioutil . TempDir ( "" , "test" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-10-19 08:27:08 -07:00
t . Cleanup ( func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( tmpDir ) )
2020-10-19 08:27:08 -07:00
} )
2021-06-05 07:29:32 -07:00
db , err := Open ( tmpDir , log . NewNopLogger ( ) , prometheus . NewRegistry ( ) , tsdbCfg , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-10-19 08:27:08 -07:00
t . Cleanup ( func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-10-19 08:27:08 -07:00
} )
db . DisableCompactions ( )
// Case 1: Lot's of uncompacted data in Head.
lbls := labels . Labels { labels . Label { Name : "foo_d" , Value : "choco_bar" } }
// Append samples spanning 59 block ranges.
app := db . Appender ( context . Background ( ) )
for i := int64 ( 0 ) ; i < 60 ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , lbls , blockRange * i , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbls , ( blockRange * i ) + blockRange / 2 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-10-19 08:27:08 -07:00
// Rotate the WAL file so that there is >3 files for checkpoint to happen.
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . head . wal . NextSegment ( ) )
2020-10-19 08:27:08 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-10-19 08:27:08 -07:00
// Check the existing WAL files.
first , last , err := wal . Segments ( db . head . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , first )
require . Equal ( t , 60 , last )
2020-10-19 08:27:08 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( db . head . metrics . checkpointCreationTotal ) )
require . NoError ( t , db . Compact ( ) )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . checkpointCreationTotal ) )
2020-10-19 08:27:08 -07:00
// As the data spans for 59 blocks, 58 go to disk and 1 remains in Head.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 58 , len ( db . Blocks ( ) ) )
2020-10-19 08:27:08 -07:00
// Though WAL was truncated only once, head should be truncated after each compaction.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 58.0 , prom_testutil . ToFloat64 ( db . head . metrics . headTruncateTotal ) )
2020-10-19 08:27:08 -07:00
// The compaction should have only truncated first 2/3 of WAL (while also rotating the files).
first , last , err = wal . Segments ( db . head . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 40 , first )
require . Equal ( t , 61 , last )
2020-10-19 08:27:08 -07:00
// The first checkpoint would be for first 2/3rd of WAL, hence till 39.
// That should be the last checkpoint.
_ , cno , err := wal . LastCheckpoint ( db . head . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 39 , cno )
2020-10-19 08:27:08 -07:00
// Case 2: Old blocks on disk.
// The above blocks will act as old blocks.
// Creating a block to cover the data in the Head so that
// Head will skip the data during replay and start fresh.
blocks := db . Blocks ( )
newBlockMint := blocks [ len ( blocks ) - 1 ] . Meta ( ) . MaxTime
newBlockMaxt := db . Head ( ) . MaxTime ( ) + 1
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-10-19 08:27:08 -07:00
createBlock ( t , db . dir , genSeries ( 1 , 1 , newBlockMint , newBlockMaxt ) )
2021-06-05 07:29:32 -07:00
db , err = Open ( db . dir , log . NewNopLogger ( ) , prometheus . NewRegistry ( ) , tsdbCfg , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-10-19 08:27:08 -07:00
db . DisableCompactions ( )
// 1 block more.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 59 , len ( db . Blocks ( ) ) )
2020-10-19 08:27:08 -07:00
// No series in Head because of this new block.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0 , int ( db . head . NumSeries ( ) ) )
2020-10-19 08:27:08 -07:00
// Adding sample way into the future.
app = db . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbls , blockRange * 120 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2020-10-19 08:27:08 -07:00
// The mint of head is the last block maxt, that means the gap between mint and maxt
// of Head is too large. This will trigger many compactions.
2020-10-29 02:43:23 -07:00
require . Equal ( t , newBlockMaxt , db . head . MinTime ( ) )
2020-10-19 08:27:08 -07:00
// Another WAL file was rotated.
first , last , err = wal . Segments ( db . head . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 40 , first )
require . Equal ( t , 62 , last )
2020-10-19 08:27:08 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( db . head . metrics . checkpointCreationTotal ) )
require . NoError ( t , db . Compact ( ) )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . checkpointCreationTotal ) )
2020-10-19 08:27:08 -07:00
// No new blocks should be created as there was not data in between the new samples and the blocks.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 59 , len ( db . Blocks ( ) ) )
2020-10-19 08:27:08 -07:00
// The compaction should have only truncated first 2/3 of WAL (while also rotating the files).
first , last , err = wal . Segments ( db . head . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 55 , first )
require . Equal ( t , 63 , last )
2020-10-19 08:27:08 -07:00
// The first checkpoint would be for first 2/3rd of WAL, hence till 54.
// That should be the last checkpoint.
_ , cno , err = wal . LastCheckpoint ( db . head . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 54 , cno )
2020-10-19 08:27:08 -07:00
}
2020-10-28 03:09:03 -07:00
func TestNoPanicOnTSDBOpenError ( t * testing . T ) {
tmpdir , err := ioutil . TempDir ( "" , "test" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-10-28 03:09:03 -07:00
t . Cleanup ( func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( tmpdir ) )
2020-10-28 03:09:03 -07:00
} )
absdir , err := filepath . Abs ( tmpdir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-10-28 03:09:03 -07:00
// Taking the file lock will cause TSDB startup error.
lockf , _ , err := fileutil . Flock ( filepath . Join ( absdir , "lock" ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-10-28 03:09:03 -07:00
2021-06-05 07:29:32 -07:00
_ , err = Open ( tmpdir , nil , nil , DefaultOptions ( ) , nil )
2020-10-29 02:43:23 -07:00
require . Error ( t , err )
2020-10-28 03:09:03 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , lockf . Release ( ) )
2020-10-28 03:09:03 -07:00
}
2021-05-06 13:18:59 -07:00
2021-06-16 02:33:02 -07:00
func TestLockfileMetric ( t * testing . T ) {
cases := [ ] struct {
fileAlreadyExists bool
lockFileDisabled bool
expectedValue int
} {
{
fileAlreadyExists : false ,
lockFileDisabled : false ,
expectedValue : lockfileCreatedCleanly ,
} ,
{
fileAlreadyExists : true ,
lockFileDisabled : false ,
expectedValue : lockfileReplaced ,
} ,
{
fileAlreadyExists : true ,
lockFileDisabled : true ,
expectedValue : lockfileDisabled ,
} ,
{
fileAlreadyExists : false ,
lockFileDisabled : true ,
expectedValue : lockfileDisabled ,
} ,
}
for _ , c := range cases {
t . Run ( fmt . Sprintf ( "%+v" , c ) , func ( t * testing . T ) {
tmpdir , err := ioutil . TempDir ( "" , "test" )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , os . RemoveAll ( tmpdir ) )
} )
absdir , err := filepath . Abs ( tmpdir )
require . NoError ( t , err )
// Test preconditions (file already exists + lockfile option)
lockfilePath := filepath . Join ( absdir , "lock" )
if c . fileAlreadyExists {
2021-10-22 01:06:44 -07:00
err = ioutil . WriteFile ( lockfilePath , [ ] byte { } , 0 o644 )
2021-06-16 02:33:02 -07:00
require . NoError ( t , err )
}
opts := DefaultOptions ( )
opts . NoLockfile = c . lockFileDisabled
// Create the DB, this should create a lockfile and the metrics
2021-06-16 08:18:32 -07:00
db , err := Open ( tmpdir , nil , nil , opts , nil )
2021-06-16 02:33:02 -07:00
require . NoError ( t , err )
require . Equal ( t , float64 ( c . expectedValue ) , prom_testutil . ToFloat64 ( db . metrics . lockfileCreatedCleanly ) )
// Close the DB, this should delete the lockfile
require . NoError ( t , db . Close ( ) )
// Check that the lockfile is always deleted
if ! c . lockFileDisabled {
_ , err = os . Stat ( lockfilePath )
require . Error ( t , err , "lockfile was not deleted" )
}
} )
}
}
2021-05-06 13:18:59 -07:00
func TestQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks ( t * testing . T ) {
t . Skip ( "TODO: investigate why process crash in CI" )
const numRuns = 5
for i := 1 ; i <= numRuns ; i ++ {
t . Run ( strconv . Itoa ( i ) , func ( t * testing . T ) {
testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks ( t )
} )
}
}
func testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks ( t * testing . T ) {
const (
numSeries = 1000
numStressIterations = 10000
minStressAllocationBytes = 128 * 1024
maxStressAllocationBytes = 512 * 1024
)
db := openTestDB ( t , nil , nil )
defer func ( ) {
require . NoError ( t , db . Close ( ) )
} ( )
// Disable compactions so we can control it.
db . DisableCompactions ( )
// Generate the metrics we're going to append.
metrics := make ( [ ] labels . Labels , 0 , numSeries )
for i := 0 ; i < numSeries ; i ++ {
metrics = append ( metrics , labels . Labels { { Name : labels . MetricName , Value : fmt . Sprintf ( "test_%d" , i ) } } )
}
// Push 1 sample every 15s for 2x the block duration period.
ctx := context . Background ( )
interval := int64 ( 15 * time . Second / time . Millisecond )
ts := int64 ( 0 )
for ; ts < 2 * DefaultBlockDuration ; ts += interval {
app := db . Appender ( ctx )
for _ , metric := range metrics {
_ , err := app . Append ( 0 , metric , ts , float64 ( ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
// Compact the TSDB head for the first time. We expect the head chunks file has been cut.
require . NoError ( t , db . Compact ( ) )
require . Equal ( t , float64 ( 1 ) , prom_testutil . ToFloat64 ( db . Head ( ) . metrics . headTruncateTotal ) )
// Push more samples for another 1x block duration period.
for ; ts < 3 * DefaultBlockDuration ; ts += interval {
app := db . Appender ( ctx )
for _ , metric := range metrics {
_ , err := app . Append ( 0 , metric , ts , float64 ( ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
// At this point we expect 2 mmap-ed head chunks.
// Get a querier and make sure it's closed only once the test is over.
querier , err := db . Querier ( ctx , 0 , math . MaxInt64 )
require . NoError ( t , err )
defer func ( ) {
require . NoError ( t , querier . Close ( ) )
} ( )
// Query back all series.
hints := & storage . SelectHints { Start : 0 , End : math . MaxInt64 , Step : interval }
seriesSet := querier . Select ( true , hints , labels . MustNewMatcher ( labels . MatchRegexp , labels . MetricName , ".+" ) )
// Fetch samples iterators from all series.
var iterators [ ] chunkenc . Iterator
actualSeries := 0
for seriesSet . Next ( ) {
actualSeries ++
// Get the iterator and call Next() so that we're sure the chunk is loaded.
it := seriesSet . At ( ) . Iterator ( )
it . Next ( )
it . At ( )
iterators = append ( iterators , it )
}
require . NoError ( t , seriesSet . Err ( ) )
require . Equal ( t , actualSeries , numSeries )
// Compact the TSDB head again.
require . NoError ( t , db . Compact ( ) )
require . Equal ( t , float64 ( 2 ) , prom_testutil . ToFloat64 ( db . Head ( ) . metrics . headTruncateTotal ) )
// At this point we expect 1 head chunk has been deleted.
// Stress the memory and call GC. This is required to increase the chances
// the chunk memory area is released to the kernel.
var buf [ ] byte
for i := 0 ; i < numStressIterations ; i ++ {
//nolint:staticcheck
buf = append ( buf , make ( [ ] byte , minStressAllocationBytes + rand . Int31n ( maxStressAllocationBytes - minStressAllocationBytes ) ) ... )
if i % 1000 == 0 {
buf = nil
}
}
// Iterate samples. Here we're summing it just to make sure no golang compiler
// optimization triggers in case we discard the result of it.At().
var sum float64
var firstErr error
for _ , it := range iterators {
for it . Next ( ) {
_ , v := it . At ( )
sum += v
}
if err := it . Err ( ) ; err != nil {
firstErr = err
}
}
// After having iterated all samples we also want to be sure no error occurred or
// the "cannot populate chunk XXX: not found" error occurred. This error can occur
// when the iterator tries to fetch an head chunk which has been offloaded because
// of the head compaction in the meanwhile.
if firstErr != nil && ! strings . Contains ( firstErr . Error ( ) , "cannot populate chunk" ) {
t . Fatalf ( "unexpected error: %s" , firstErr . Error ( ) )
}
}
func TestChunkQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks ( t * testing . T ) {
t . Skip ( "TODO: investigate why process crash in CI" )
const numRuns = 5
for i := 1 ; i <= numRuns ; i ++ {
t . Run ( strconv . Itoa ( i ) , func ( t * testing . T ) {
testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks ( t )
} )
}
}
func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks ( t * testing . T ) {
const (
numSeries = 1000
numStressIterations = 10000
minStressAllocationBytes = 128 * 1024
maxStressAllocationBytes = 512 * 1024
)
db := openTestDB ( t , nil , nil )
defer func ( ) {
require . NoError ( t , db . Close ( ) )
} ( )
// Disable compactions so we can control it.
db . DisableCompactions ( )
// Generate the metrics we're going to append.
metrics := make ( [ ] labels . Labels , 0 , numSeries )
for i := 0 ; i < numSeries ; i ++ {
metrics = append ( metrics , labels . Labels { { Name : labels . MetricName , Value : fmt . Sprintf ( "test_%d" , i ) } } )
}
// Push 1 sample every 15s for 2x the block duration period.
ctx := context . Background ( )
interval := int64 ( 15 * time . Second / time . Millisecond )
ts := int64 ( 0 )
for ; ts < 2 * DefaultBlockDuration ; ts += interval {
app := db . Appender ( ctx )
for _ , metric := range metrics {
_ , err := app . Append ( 0 , metric , ts , float64 ( ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
// Compact the TSDB head for the first time. We expect the head chunks file has been cut.
require . NoError ( t , db . Compact ( ) )
require . Equal ( t , float64 ( 1 ) , prom_testutil . ToFloat64 ( db . Head ( ) . metrics . headTruncateTotal ) )
// Push more samples for another 1x block duration period.
for ; ts < 3 * DefaultBlockDuration ; ts += interval {
app := db . Appender ( ctx )
for _ , metric := range metrics {
_ , err := app . Append ( 0 , metric , ts , float64 ( ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
// At this point we expect 2 mmap-ed head chunks.
// Get a querier and make sure it's closed only once the test is over.
querier , err := db . ChunkQuerier ( ctx , 0 , math . MaxInt64 )
require . NoError ( t , err )
defer func ( ) {
require . NoError ( t , querier . Close ( ) )
} ( )
// Query back all series.
hints := & storage . SelectHints { Start : 0 , End : math . MaxInt64 , Step : interval }
seriesSet := querier . Select ( true , hints , labels . MustNewMatcher ( labels . MatchRegexp , labels . MetricName , ".+" ) )
// Iterate all series and get their chunks.
var chunks [ ] chunkenc . Chunk
actualSeries := 0
for seriesSet . Next ( ) {
actualSeries ++
for it := seriesSet . At ( ) . Iterator ( ) ; it . Next ( ) ; {
chunks = append ( chunks , it . At ( ) . Chunk )
}
}
require . NoError ( t , seriesSet . Err ( ) )
require . Equal ( t , actualSeries , numSeries )
// Compact the TSDB head again.
require . NoError ( t , db . Compact ( ) )
require . Equal ( t , float64 ( 2 ) , prom_testutil . ToFloat64 ( db . Head ( ) . metrics . headTruncateTotal ) )
// At this point we expect 1 head chunk has been deleted.
// Stress the memory and call GC. This is required to increase the chances
// the chunk memory area is released to the kernel.
var buf [ ] byte
for i := 0 ; i < numStressIterations ; i ++ {
//nolint:staticcheck
buf = append ( buf , make ( [ ] byte , minStressAllocationBytes + rand . Int31n ( maxStressAllocationBytes - minStressAllocationBytes ) ) ... )
if i % 1000 == 0 {
buf = nil
}
}
// Iterate chunks and read their bytes slice. Here we're computing the CRC32
// just to iterate through the bytes slice. We don't really care the reason why
// we read this data, we just need to read it to make sure the memory address
// of the []byte is still valid.
chkCRC32 := newCRC32 ( )
for _ , chunk := range chunks {
chkCRC32 . Reset ( )
_ , err := chkCRC32 . Write ( chunk . Bytes ( ) )
require . NoError ( t , err )
}
}
2021-07-20 01:47:20 -07:00
func newTestDB ( t * testing . T ) * DB {
dir , err := ioutil . TempDir ( "" , "test" )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , os . RemoveAll ( dir ) )
} )
db , err := Open ( dir , nil , nil , DefaultOptions ( ) , nil )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
return db
}