2017-04-10 11:59:45 -07:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2016-12-04 04:16:11 -08:00
package tsdb
2017-04-18 09:22:13 -07:00
import (
2018-06-04 05:35:36 -07:00
"fmt"
2017-04-18 09:22:13 -07:00
"io/ioutil"
2017-08-28 15:39:17 -07:00
"math"
2017-05-23 05:43:30 -07:00
"math/rand"
2017-04-18 09:22:13 -07:00
"os"
2018-05-25 14:19:32 -07:00
"path"
2018-05-28 13:00:36 -07:00
"path/filepath"
2017-08-28 15:39:17 -07:00
"sort"
2017-04-18 09:22:13 -07:00
"testing"
2018-06-04 05:35:36 -07:00
"time"
2017-04-18 09:22:13 -07:00
2018-12-04 02:30:49 -08:00
"github.com/go-kit/kit/log"
2018-05-28 13:00:36 -07:00
"github.com/oklog/ulid"
2017-05-17 07:43:01 -07:00
"github.com/pkg/errors"
2018-11-28 01:23:50 -08:00
"github.com/prometheus/client_golang/prometheus"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
2018-06-13 02:24:28 -07:00
"github.com/prometheus/tsdb/chunks"
"github.com/prometheus/tsdb/index"
2017-04-18 09:22:13 -07:00
"github.com/prometheus/tsdb/labels"
2017-12-06 17:06:14 -08:00
"github.com/prometheus/tsdb/testutil"
2019-01-28 03:24:49 -08:00
"github.com/prometheus/tsdb/tsdbutil"
2018-05-25 14:19:32 -07:00
"github.com/prometheus/tsdb/wal"
2017-04-18 09:22:13 -07:00
)
2017-08-28 15:39:17 -07:00
func openTestDB ( t testing . TB , opts * Options ) ( db * DB , close func ( ) ) {
2017-11-21 03:15:02 -08:00
tmpdir , err := ioutil . TempDir ( "" , "test" )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-08-28 15:39:17 -07:00
2017-11-21 03:15:02 -08:00
db , err = Open ( tmpdir , nil , nil , opts )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-08-28 15:39:17 -07:00
// Do not close the test database by default as it will deadlock on test failures.
2019-01-30 01:40:12 -08:00
return db , func ( ) {
testutil . Ok ( t , os . RemoveAll ( tmpdir ) )
}
2017-08-28 15:39:17 -07:00
}
2017-11-13 03:16:58 -08:00
// query runs a matcher query against the querier and fully expands its data.
2019-02-14 05:29:41 -08:00
func query ( t testing . TB , q Querier , matchers ... labels . Matcher ) map [ string ] [ ] tsdbutil . Sample {
2017-11-13 03:16:58 -08:00
ss , err := q . Select ( matchers ... )
2019-02-14 05:29:41 -08:00
defer func ( ) {
testutil . Ok ( t , q . Close ( ) )
} ( )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-11-13 03:16:58 -08:00
2019-02-14 05:29:41 -08:00
result := map [ string ] [ ] tsdbutil . Sample { }
2017-04-20 06:24:35 -07:00
for ss . Next ( ) {
series := ss . At ( )
2019-02-14 05:29:41 -08:00
samples := [ ] tsdbutil . Sample { }
2017-04-20 06:24:35 -07:00
it := series . Iterator ( )
for it . Next ( ) {
t , v := it . At ( )
samples = append ( samples , sample { t : t , v : v } )
}
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , it . Err ( ) )
2017-04-20 06:24:35 -07:00
name := series . Labels ( ) . String ( )
result [ name ] = samples
}
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , ss . Err ( ) )
2017-08-28 15:39:17 -07:00
return result
2017-04-20 06:24:35 -07:00
}
2018-05-28 13:00:36 -07:00
// Ensure that blocks are held in memory in their time order
// and not in ULID order as they are read from the directory.
func TestDB_reloadOrder ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2018-05-28 13:00:36 -07:00
2018-12-29 03:20:51 -08:00
metas := [ ] BlockMeta {
{ MinTime : 90 , MaxTime : 100 } ,
{ MinTime : 70 , MaxTime : 80 } ,
{ MinTime : 100 , MaxTime : 110 } ,
2018-05-28 13:00:36 -07:00
}
for _ , m := range metas {
2019-01-28 03:24:49 -08:00
createBlock ( t , db . Dir ( ) , genSeries ( 1 , 1 , m . MinTime , m . MaxTime ) )
2018-05-28 13:00:36 -07:00
}
testutil . Ok ( t , db . reload ( ) )
blocks := db . Blocks ( )
2019-01-16 02:03:52 -08:00
for _ , b := range blocks {
b . meta . Stats . NumBytes = 0
}
2018-05-28 13:00:36 -07:00
testutil . Equals ( t , 3 , len ( blocks ) )
2018-12-29 03:20:51 -08:00
testutil . Equals ( t , metas [ 1 ] . MinTime , blocks [ 0 ] . Meta ( ) . MinTime )
testutil . Equals ( t , metas [ 1 ] . MaxTime , blocks [ 0 ] . Meta ( ) . MaxTime )
testutil . Equals ( t , metas [ 0 ] . MinTime , blocks [ 1 ] . Meta ( ) . MinTime )
testutil . Equals ( t , metas [ 0 ] . MaxTime , blocks [ 1 ] . Meta ( ) . MaxTime )
testutil . Equals ( t , metas [ 2 ] . MinTime , blocks [ 2 ] . Meta ( ) . MinTime )
testutil . Equals ( t , metas [ 2 ] . MaxTime , blocks [ 2 ] . Meta ( ) . MaxTime )
2018-05-28 13:00:36 -07:00
}
2017-04-18 09:22:13 -07:00
func TestDataAvailableOnlyAfterCommit ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2017-04-18 09:22:13 -07:00
app := db . Appender ( )
2017-08-28 15:39:17 -07:00
_ , err := app . Add ( labels . FromStrings ( "foo" , "bar" ) , 0 , 0 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-04-18 09:22:13 -07:00
2017-10-09 06:21:46 -07:00
querier , err := db . Querier ( 0 , 1 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-11-13 03:16:58 -08:00
seriesSet := query ( t , querier , labels . NewEqualMatcher ( "foo" , "bar" ) )
2019-02-14 05:29:41 -08:00
testutil . Equals ( t , map [ string ] [ ] tsdbutil . Sample { } , seriesSet )
2017-04-18 09:22:13 -07:00
err = app . Commit ( )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-04-18 09:22:13 -07:00
2017-10-09 06:21:46 -07:00
querier , err = db . Querier ( 0 , 1 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-04-18 09:22:13 -07:00
defer querier . Close ( )
2017-11-13 03:16:58 -08:00
seriesSet = query ( t , querier , labels . NewEqualMatcher ( "foo" , "bar" ) )
2017-08-28 15:39:17 -07:00
2019-02-14 05:29:41 -08:00
testutil . Equals ( t , map [ string ] [ ] tsdbutil . Sample { ` { foo="bar"} ` : { sample { t : 0 , v : 0 } } } , seriesSet )
2017-04-18 09:22:13 -07:00
}
func TestDataNotAvailableAfterRollback ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2017-04-18 09:22:13 -07:00
app := db . Appender ( )
2017-08-28 15:39:17 -07:00
_ , err := app . Add ( labels . FromStrings ( "foo" , "bar" ) , 0 , 0 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-04-18 09:22:13 -07:00
err = app . Rollback ( )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-04-18 09:22:13 -07:00
2017-10-09 06:21:46 -07:00
querier , err := db . Querier ( 0 , 1 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-04-18 09:22:13 -07:00
defer querier . Close ( )
2017-11-13 03:16:58 -08:00
seriesSet := query ( t , querier , labels . NewEqualMatcher ( "foo" , "bar" ) )
2017-08-28 15:39:17 -07:00
2019-02-14 05:29:41 -08:00
testutil . Equals ( t , map [ string ] [ ] tsdbutil . Sample { } , seriesSet )
2017-04-18 09:22:13 -07:00
}
2017-04-28 06:24:28 -07:00
func TestDBAppenderAddRef ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2017-04-28 06:24:28 -07:00
2017-05-18 07:09:30 -07:00
app1 := db . Appender ( )
2017-04-28 06:24:28 -07:00
2017-09-05 02:45:18 -07:00
ref1 , err := app1 . Add ( labels . FromStrings ( "a" , "b" ) , 123 , 0 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-04-28 06:24:28 -07:00
2017-09-05 02:45:18 -07:00
// Reference should already work before commit.
err = app1 . AddFast ( ref1 , 124 , 1 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-05-17 07:43:01 -07:00
2017-05-18 07:09:30 -07:00
err = app1 . Commit ( )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-05-17 07:43:01 -07:00
2017-05-18 07:09:30 -07:00
app2 := db . Appender ( )
2017-09-05 02:45:18 -07:00
// first ref should already work in next transaction.
err = app2 . AddFast ( ref1 , 125 , 0 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-05-17 07:43:01 -07:00
2017-09-05 02:45:18 -07:00
ref2 , err := app2 . Add ( labels . FromStrings ( "a" , "b" ) , 133 , 1 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-09-05 02:45:18 -07:00
2017-12-06 17:06:14 -08:00
testutil . Assert ( t , ref1 == ref2 , "" )
2017-09-05 02:45:18 -07:00
2017-04-28 06:24:28 -07:00
// Reference must be valid to add another sample.
2017-09-05 02:45:18 -07:00
err = app2 . AddFast ( ref2 , 143 , 2 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-04-28 06:24:28 -07:00
2017-09-05 02:45:18 -07:00
err = app2 . AddFast ( 9999999 , 1 , 1 )
2018-09-20 23:23:01 -07:00
testutil . Equals ( t , ErrNotFound , errors . Cause ( err ) )
2017-08-28 15:39:17 -07:00
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , app2 . Commit ( ) )
2017-08-28 15:39:17 -07:00
2017-10-09 06:21:46 -07:00
q , err := db . Querier ( 0 , 200 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-10-09 06:21:46 -07:00
2017-11-13 03:16:58 -08:00
res := query ( t , q , labels . NewEqualMatcher ( "a" , "b" ) )
2017-08-28 15:39:17 -07:00
2019-02-14 05:29:41 -08:00
testutil . Equals ( t , map [ string ] [ ] tsdbutil . Sample {
2018-05-07 05:39:54 -07:00
labels . FromStrings ( "a" , "b" ) . String ( ) : {
2019-02-14 05:29:41 -08:00
sample { t : 123 , v : 0 } ,
sample { t : 124 , v : 1 } ,
sample { t : 125 , v : 0 } ,
sample { t : 133 , v : 1 } ,
sample { t : 143 , v : 2 } ,
2017-08-28 15:39:17 -07:00
} ,
} , res )
2017-04-28 06:24:28 -07:00
}
2017-05-23 05:43:30 -07:00
func TestDeleteSimple ( t * testing . T ) {
numSamples := int64 ( 10 )
cases := [ ] struct {
2017-08-25 01:11:46 -07:00
intervals Intervals
2017-05-23 05:43:30 -07:00
remaint [ ] int64
} {
2019-01-08 09:08:41 -08:00
{
intervals : Intervals { { 0 , 3 } } ,
remaint : [ ] int64 { 4 , 5 , 6 , 7 , 8 , 9 } ,
} ,
{
intervals : Intervals { { 1 , 3 } } ,
remaint : [ ] int64 { 0 , 4 , 5 , 6 , 7 , 8 , 9 } ,
} ,
2017-05-23 05:43:30 -07:00
{
2017-08-25 01:11:46 -07:00
intervals : Intervals { { 1 , 3 } , { 4 , 7 } } ,
2017-05-23 05:43:30 -07:00
remaint : [ ] int64 { 0 , 8 , 9 } ,
} ,
2019-01-08 09:08:41 -08:00
{
intervals : Intervals { { 1 , 3 } , { 4 , 700 } } ,
remaint : [ ] int64 { 0 } ,
} ,
{ // This case is to ensure that labels and symbols are deleted.
intervals : Intervals { { 0 , 9 } } ,
remaint : [ ] int64 { } ,
} ,
2017-05-23 05:43:30 -07:00
}
Outer :
for _ , c := range cases {
2019-02-08 03:26:28 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2019-01-08 09:08:41 -08:00
app := db . Appender ( )
smpls := make ( [ ] float64 , numSamples )
for i := int64 ( 0 ) ; i < numSamples ; i ++ {
smpls [ i ] = rand . Float64 ( )
app . Add ( labels . Labels { { "a" , "b" } } , i , smpls [ i ] )
}
testutil . Ok ( t , app . Commit ( ) )
2017-05-23 05:43:30 -07:00
// TODO(gouthamve): Reset the tombstones somehow.
// Delete the ranges.
for _ , r := range c . intervals {
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , db . Delete ( r . Mint , r . Maxt , labels . NewEqualMatcher ( "a" , "b" ) ) )
2017-05-23 05:43:30 -07:00
}
// Compare the result.
2017-10-09 06:21:46 -07:00
q , err := db . Querier ( 0 , numSamples )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-10-09 06:21:46 -07:00
2017-11-13 03:16:58 -08:00
res , err := q . Select ( labels . NewEqualMatcher ( "a" , "b" ) )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-05-23 05:43:30 -07:00
2019-01-28 03:24:49 -08:00
expSamples := make ( [ ] tsdbutil . Sample , 0 , len ( c . remaint ) )
2017-05-23 05:43:30 -07:00
for _ , ts := range c . remaint {
expSamples = append ( expSamples , sample { ts , smpls [ ts ] } )
}
2018-09-21 01:07:35 -07:00
expss := newMockSeriesSet ( [ ] Series {
2017-05-23 05:43:30 -07:00
newSeries ( map [ string ] string { "a" : "b" } , expSamples ) ,
} )
2019-01-08 09:08:41 -08:00
lns , err := q . LabelNames ( )
testutil . Ok ( t , err )
lvs , err := q . LabelValues ( "a" )
testutil . Ok ( t , err )
2017-05-23 05:43:30 -07:00
if len ( expSamples ) == 0 {
2019-01-08 09:08:41 -08:00
testutil . Equals ( t , 0 , len ( lns ) )
testutil . Equals ( t , 0 , len ( lvs ) )
2017-12-06 17:06:14 -08:00
testutil . Assert ( t , res . Next ( ) == false , "" )
2017-05-23 05:43:30 -07:00
continue
2019-01-08 09:08:41 -08:00
} else {
testutil . Equals ( t , 1 , len ( lns ) )
testutil . Equals ( t , 1 , len ( lvs ) )
testutil . Equals ( t , "a" , lns [ 0 ] )
testutil . Equals ( t , "b" , lvs [ 0 ] )
2017-05-23 05:43:30 -07:00
}
for {
eok , rok := expss . Next ( ) , res . Next ( )
2017-12-06 17:06:14 -08:00
testutil . Equals ( t , eok , rok )
2017-05-23 05:43:30 -07:00
if ! eok {
continue Outer
}
sexp := expss . At ( )
sres := res . At ( )
2017-12-06 17:06:14 -08:00
testutil . Equals ( t , sexp . Labels ( ) , sres . Labels ( ) )
2017-05-23 05:43:30 -07:00
smplExp , errExp := expandSeriesIterator ( sexp . Iterator ( ) )
smplRes , errRes := expandSeriesIterator ( sres . Iterator ( ) )
2017-12-06 17:06:14 -08:00
testutil . Equals ( t , errExp , errRes )
testutil . Equals ( t , smplExp , smplRes )
2017-05-23 05:43:30 -07:00
}
}
}
2017-08-28 15:39:17 -07:00
func TestAmendDatapointCausesError ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2017-08-28 15:39:17 -07:00
app := db . Appender ( )
_ , err := app . Add ( labels . Labels { } , 0 , 0 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
testutil . Ok ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
app = db . Appender ( )
_ , err = app . Add ( labels . Labels { } , 0 , 1 )
2017-12-06 17:06:14 -08:00
testutil . Equals ( t , ErrAmendSample , err )
testutil . Ok ( t , app . Rollback ( ) )
2017-08-28 15:39:17 -07:00
}
func TestDuplicateNaNDatapointNoAmendError ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2017-08-28 15:39:17 -07:00
app := db . Appender ( )
_ , err := app . Add ( labels . Labels { } , 0 , math . NaN ( ) )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
testutil . Ok ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
app = db . Appender ( )
_ , err = app . Add ( labels . Labels { } , 0 , math . NaN ( ) )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-08-28 15:39:17 -07:00
}
func TestNonDuplicateNaNDatapointsCausesAmendError ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2017-08-28 15:39:17 -07:00
app := db . Appender ( )
_ , err := app . Add ( labels . Labels { } , 0 , math . Float64frombits ( 0x7ff0000000000001 ) )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
testutil . Ok ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
app = db . Appender ( )
_ , err = app . Add ( labels . Labels { } , 0 , math . Float64frombits ( 0x7ff0000000000002 ) )
2017-12-06 17:06:14 -08:00
testutil . Equals ( t , ErrAmendSample , err )
2017-08-28 15:39:17 -07:00
}
func TestSkippingInvalidValuesInSameTxn ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2017-08-28 15:39:17 -07:00
// Append AmendedValue.
app := db . Appender ( )
_ , err := app . Add ( labels . Labels { { "a" , "b" } } , 0 , 1 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-08-28 15:39:17 -07:00
_ , err = app . Add ( labels . Labels { { "a" , "b" } } , 0 , 2 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
testutil . Ok ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
// Make sure the right value is stored.
2017-10-09 06:21:46 -07:00
q , err := db . Querier ( 0 , 10 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-10-09 06:21:46 -07:00
2017-11-13 03:16:58 -08:00
ssMap := query ( t , q , labels . NewEqualMatcher ( "a" , "b" ) )
2017-08-28 15:39:17 -07:00
2019-02-14 05:29:41 -08:00
testutil . Equals ( t , map [ string ] [ ] tsdbutil . Sample {
labels . New ( labels . Label { "a" , "b" } ) . String ( ) : { sample { 0 , 1 } } ,
2017-08-28 15:39:17 -07:00
} , ssMap )
// Append Out of Order Value.
app = db . Appender ( )
_ , err = app . Add ( labels . Labels { { "a" , "b" } } , 10 , 3 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-08-28 15:39:17 -07:00
_ , err = app . Add ( labels . Labels { { "a" , "b" } } , 7 , 5 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
testutil . Ok ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
2017-10-09 06:21:46 -07:00
q , err = db . Querier ( 0 , 10 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-10-09 06:21:46 -07:00
2017-11-13 03:16:58 -08:00
ssMap = query ( t , q , labels . NewEqualMatcher ( "a" , "b" ) )
2017-08-28 15:39:17 -07:00
2019-02-14 05:29:41 -08:00
testutil . Equals ( t , map [ string ] [ ] tsdbutil . Sample {
labels . New ( labels . Label { "a" , "b" } ) . String ( ) : { sample { 0 , 1 } , sample { 10 , 3 } } ,
2017-08-28 15:39:17 -07:00
} , ssMap )
}
2017-10-03 05:06:26 -07:00
func TestDB_Snapshot ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer delete ( )
2017-10-03 05:06:26 -07:00
// append data
app := db . Appender ( )
mint := int64 ( 1414141414000 )
for i := 0 ; i < 1000 ; i ++ {
_ , err := app . Add ( labels . FromStrings ( "foo" , "bar" ) , mint + int64 ( i ) , 1.0 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-10-03 05:06:26 -07:00
}
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , app . Commit ( ) )
testutil . Ok ( t , app . Rollback ( ) )
2017-10-03 05:06:26 -07:00
// create snapshot
snap , err := ioutil . TempDir ( "" , "snap" )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2018-02-28 03:04:55 -08:00
2019-03-19 06:31:57 -07:00
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( snap ) )
} ( )
2018-02-28 03:04:55 -08:00
testutil . Ok ( t , db . Snapshot ( snap , true ) )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , db . Close ( ) )
2017-10-03 05:06:26 -07:00
// reopen DB from snapshot
db , err = Open ( snap , nil , nil , nil )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2019-01-30 01:40:12 -08:00
defer func ( ) { testutil . Ok ( t , db . Close ( ) ) } ( )
2017-10-03 05:06:26 -07:00
2017-10-09 06:21:46 -07:00
querier , err := db . Querier ( mint , mint + 1000 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2019-02-11 01:25:57 -08:00
defer func ( ) { testutil . Ok ( t , querier . Close ( ) ) } ( )
2017-10-03 05:06:26 -07:00
// sum values
2017-11-13 03:16:58 -08:00
seriesSet , err := querier . Select ( labels . NewEqualMatcher ( "foo" , "bar" ) )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-11-13 03:16:58 -08:00
2017-10-03 05:06:26 -07:00
sum := 0.0
for seriesSet . Next ( ) {
series := seriesSet . At ( ) . Iterator ( )
for series . Next ( ) {
_ , v := series . At ( )
sum += v
}
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , series . Err ( ) )
2017-10-03 05:06:26 -07:00
}
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , seriesSet . Err ( ) )
2018-09-20 23:23:01 -07:00
testutil . Equals ( t , 1000.0 , sum )
2017-10-03 05:06:26 -07:00
}
2017-11-22 04:28:06 -08:00
func TestDB_SnapshotWithDelete ( t * testing . T ) {
numSamples := int64 ( 10 )
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer delete ( )
2017-11-22 04:28:06 -08:00
app := db . Appender ( )
smpls := make ( [ ] float64 , numSamples )
for i := int64 ( 0 ) ; i < numSamples ; i ++ {
smpls [ i ] = rand . Float64 ( )
app . Add ( labels . Labels { { "a" , "b" } } , i , smpls [ i ] )
}
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , app . Commit ( ) )
2017-11-22 04:28:06 -08:00
cases := [ ] struct {
intervals Intervals
remaint [ ] int64
} {
{
intervals : Intervals { { 1 , 3 } , { 4 , 7 } } ,
remaint : [ ] int64 { 0 , 8 , 9 } ,
} ,
}
Outer :
for _ , c := range cases {
// TODO(gouthamve): Reset the tombstones somehow.
// Delete the ranges.
for _ , r := range c . intervals {
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , db . Delete ( r . Mint , r . Maxt , labels . NewEqualMatcher ( "a" , "b" ) ) )
2017-11-22 04:28:06 -08:00
}
// create snapshot
snap , err := ioutil . TempDir ( "" , "snap" )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2018-02-28 03:04:55 -08:00
2019-03-19 06:31:57 -07:00
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( snap ) )
} ( )
2018-02-28 03:04:55 -08:00
testutil . Ok ( t , db . Snapshot ( snap , true ) )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , db . Close ( ) )
2017-11-22 04:28:06 -08:00
// reopen DB from snapshot
db , err = Open ( snap , nil , nil , nil )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2019-01-30 01:40:12 -08:00
defer func ( ) { testutil . Ok ( t , db . Close ( ) ) } ( )
2017-11-22 04:28:06 -08:00
// Compare the result.
q , err := db . Querier ( 0 , numSamples )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2019-01-30 01:40:12 -08:00
defer func ( ) { testutil . Ok ( t , q . Close ( ) ) } ( )
2017-11-22 04:28:06 -08:00
2017-11-29 23:58:52 -08:00
res , err := q . Select ( labels . NewEqualMatcher ( "a" , "b" ) )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-11-22 04:28:06 -08:00
2019-01-28 03:24:49 -08:00
expSamples := make ( [ ] tsdbutil . Sample , 0 , len ( c . remaint ) )
2017-11-22 04:28:06 -08:00
for _ , ts := range c . remaint {
expSamples = append ( expSamples , sample { ts , smpls [ ts ] } )
}
2018-09-21 01:07:35 -07:00
expss := newMockSeriesSet ( [ ] Series {
2017-11-22 04:28:06 -08:00
newSeries ( map [ string ] string { "a" : "b" } , expSamples ) ,
} )
if len ( expSamples ) == 0 {
2017-12-06 17:06:14 -08:00
testutil . Assert ( t , res . Next ( ) == false , "" )
2017-11-22 04:28:06 -08:00
continue
}
for {
eok , rok := expss . Next ( ) , res . Next ( )
2017-12-06 17:06:14 -08:00
testutil . Equals ( t , eok , rok )
2017-11-22 04:28:06 -08:00
if ! eok {
continue Outer
}
sexp := expss . At ( )
sres := res . At ( )
2017-12-06 17:06:14 -08:00
testutil . Equals ( t , sexp . Labels ( ) , sres . Labels ( ) )
2017-11-22 04:28:06 -08:00
smplExp , errExp := expandSeriesIterator ( sexp . Iterator ( ) )
smplRes , errRes := expandSeriesIterator ( sres . Iterator ( ) )
2017-12-06 17:06:14 -08:00
testutil . Equals ( t , errExp , errRes )
testutil . Equals ( t , smplExp , smplRes )
2017-11-22 04:28:06 -08:00
}
}
}
2017-08-28 15:39:17 -07:00
func TestDB_e2e ( t * testing . T ) {
const (
numDatapoints = 1000
numRanges = 1000
timeInterval = int64 ( 3 )
)
// Create 8 series with 1000 data-points of different ranges and run queries.
lbls := [ ] [ ] labels . Label {
{
{ "a" , "b" } ,
{ "instance" , "localhost:9090" } ,
{ "job" , "prometheus" } ,
} ,
{
{ "a" , "b" } ,
{ "instance" , "127.0.0.1:9090" } ,
{ "job" , "prometheus" } ,
} ,
{
{ "a" , "b" } ,
{ "instance" , "127.0.0.1:9090" } ,
{ "job" , "prom-k8s" } ,
} ,
{
{ "a" , "b" } ,
{ "instance" , "localhost:9090" } ,
{ "job" , "prom-k8s" } ,
} ,
{
{ "a" , "c" } ,
{ "instance" , "localhost:9090" } ,
{ "job" , "prometheus" } ,
} ,
{
{ "a" , "c" } ,
{ "instance" , "127.0.0.1:9090" } ,
{ "job" , "prometheus" } ,
} ,
{
{ "a" , "c" } ,
{ "instance" , "127.0.0.1:9090" } ,
{ "job" , "prom-k8s" } ,
} ,
{
{ "a" , "c" } ,
{ "instance" , "localhost:9090" } ,
{ "job" , "prom-k8s" } ,
} ,
}
2019-02-14 05:29:41 -08:00
seriesMap := map [ string ] [ ] tsdbutil . Sample { }
2017-08-28 15:39:17 -07:00
for _ , l := range lbls {
2019-02-14 05:29:41 -08:00
seriesMap [ labels . New ( l ... ) . String ( ) ] = [ ] tsdbutil . Sample { }
2017-08-28 15:39:17 -07:00
}
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2017-08-28 15:39:17 -07:00
app := db . Appender ( )
for _ , l := range lbls {
lset := labels . New ( l ... )
2019-02-14 05:29:41 -08:00
series := [ ] tsdbutil . Sample { }
2017-08-28 15:39:17 -07:00
ts := rand . Int63n ( 300 )
for i := 0 ; i < numDatapoints ; i ++ {
v := rand . Float64 ( )
series = append ( series , sample { ts , v } )
_ , err := app . Add ( lset , ts , v )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-08-28 15:39:17 -07:00
ts += rand . Int63n ( timeInterval ) + 1
}
seriesMap [ lset . String ( ) ] = series
}
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
// Query each selector on 1000 random time-ranges.
queries := [ ] struct {
ms [ ] labels . Matcher
} {
{
ms : [ ] labels . Matcher { labels . NewEqualMatcher ( "a" , "b" ) } ,
} ,
{
ms : [ ] labels . Matcher {
labels . NewEqualMatcher ( "a" , "b" ) ,
labels . NewEqualMatcher ( "job" , "prom-k8s" ) ,
} ,
} ,
{
ms : [ ] labels . Matcher {
labels . NewEqualMatcher ( "a" , "c" ) ,
labels . NewEqualMatcher ( "instance" , "localhost:9090" ) ,
labels . NewEqualMatcher ( "job" , "prometheus" ) ,
} ,
} ,
// TODO: Add Regexp Matchers.
}
for _ , qry := range queries {
matched := labels . Slice { }
for _ , ls := range lbls {
s := labels . Selector ( qry . ms )
if s . Matches ( ls ) {
matched = append ( matched , ls )
}
}
sort . Sort ( matched )
for i := 0 ; i < numRanges ; i ++ {
mint := rand . Int63n ( 300 )
maxt := mint + rand . Int63n ( timeInterval * int64 ( numDatapoints ) )
2019-02-14 05:29:41 -08:00
expected := map [ string ] [ ] tsdbutil . Sample { }
2017-08-28 15:39:17 -07:00
// Build the mockSeriesSet.
for _ , m := range matched {
smpls := boundedSamples ( seriesMap [ m . String ( ) ] , mint , maxt )
if len ( smpls ) > 0 {
expected [ m . String ( ) ] = smpls
}
}
2017-10-09 06:21:46 -07:00
q , err := db . Querier ( mint , maxt )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-10-09 06:21:46 -07:00
2017-11-13 03:16:58 -08:00
ss , err := q . Select ( qry . ms ... )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-08-28 15:39:17 -07:00
2019-02-14 05:29:41 -08:00
result := map [ string ] [ ] tsdbutil . Sample { }
2017-08-28 15:39:17 -07:00
for ss . Next ( ) {
x := ss . At ( )
smpls , err := expandSeriesIterator ( x . Iterator ( ) )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-08-28 15:39:17 -07:00
if len ( smpls ) > 0 {
result [ x . Labels ( ) . String ( ) ] = smpls
}
}
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , ss . Err ( ) )
testutil . Equals ( t , expected , result )
2017-08-28 15:39:17 -07:00
q . Close ( )
}
}
}
2017-11-10 12:19:39 -08:00
func TestWALFlushedOnDBClose ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer delete ( )
2017-11-10 12:19:39 -08:00
2018-02-23 07:04:50 -08:00
dirDb := db . Dir ( )
2017-11-10 12:19:39 -08:00
lbls := labels . Labels { labels . Label { Name : "labelname" , Value : "labelvalue" } }
app := db . Appender ( )
2018-02-23 07:04:50 -08:00
_ , err := app . Add ( lbls , 0 , 1 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
testutil . Ok ( t , app . Commit ( ) )
2017-11-10 12:19:39 -08:00
2017-12-10 10:02:01 -08:00
testutil . Ok ( t , db . Close ( ) )
2017-11-10 12:19:39 -08:00
2018-02-23 07:04:50 -08:00
db , err = Open ( dirDb , nil , nil , nil )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2019-01-30 01:40:12 -08:00
defer func ( ) { testutil . Ok ( t , db . Close ( ) ) } ( )
2017-11-10 12:19:39 -08:00
q , err := db . Querier ( 0 , 1 )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-11-10 12:19:39 -08:00
values , err := q . LabelValues ( "labelname" )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2018-09-20 23:23:01 -07:00
testutil . Equals ( t , [ ] string { "labelvalue" } , values )
2017-11-10 12:19:39 -08:00
}
2017-11-23 05:27:10 -08:00
2019-03-25 16:38:12 -07:00
func TestWALSegmentSizeOptions ( t * testing . T ) {
tests := map [ int ] func ( dbdir string , segmentSize int ) {
// Default Wal Size.
0 : func ( dbDir string , segmentSize int ) {
files , err := ioutil . ReadDir ( filepath . Join ( dbDir , "wal" ) )
testutil . Ok ( t , err )
for _ , f := range files [ : len ( files ) - 1 ] {
testutil . Equals ( t , int64 ( DefaultOptions . WALSegmentSize ) , f . Size ( ) , "WAL file size doesn't match WALSegmentSize option, filename: %v" , f . Name ( ) )
}
lastFile := files [ len ( files ) - 1 ]
testutil . Assert ( t , int64 ( DefaultOptions . WALSegmentSize ) > lastFile . Size ( ) , "last WAL file size is not smaller than the WALSegmentSize option, filename: %v" , lastFile . Name ( ) )
} ,
// Custom Wal Size.
2 * 32 * 1024 : func ( dbDir string , segmentSize int ) {
files , err := ioutil . ReadDir ( filepath . Join ( dbDir , "wal" ) )
testutil . Assert ( t , len ( files ) > 1 , "current WALSegmentSize should result in more than a single WAL file." )
testutil . Ok ( t , err )
for _ , f := range files [ : len ( files ) - 1 ] {
testutil . Equals ( t , int64 ( segmentSize ) , f . Size ( ) , "WAL file size doesn't match WALSegmentSize option, filename: %v" , f . Name ( ) )
}
lastFile := files [ len ( files ) - 1 ]
testutil . Assert ( t , int64 ( segmentSize ) > lastFile . Size ( ) , "last WAL file size is not smaller than the WALSegmentSize option, filename: %v" , lastFile . Name ( ) )
} ,
// Wal disabled.
- 1 : func ( dbDir string , segmentSize int ) {
if _ , err := os . Stat ( filepath . Join ( dbDir , "wal" ) ) ; ! os . IsNotExist ( err ) {
t . Fatal ( "wal directory is present when the wal is disabled" )
}
} ,
2018-12-18 10:56:51 -08:00
}
2019-03-25 16:38:12 -07:00
for segmentSize , testFunc := range tests {
t . Run ( fmt . Sprintf ( "WALSegmentSize %d test" , segmentSize ) , func ( t * testing . T ) {
options := * DefaultOptions
options . WALSegmentSize = segmentSize
db , delete := openTestDB ( t , & options )
defer delete ( )
app := db . Appender ( )
for i := int64 ( 0 ) ; i < 155 ; i ++ {
_ , err := app . Add ( labels . Labels { labels . Label { Name : "wal" , Value : "size" } } , i , rand . Float64 ( ) )
testutil . Ok ( t , err )
testutil . Ok ( t , app . Commit ( ) )
}
2018-12-18 10:56:51 -08:00
2019-03-25 16:38:12 -07:00
dbDir := db . Dir ( )
db . Close ( )
testFunc ( dbDir , options . WALSegmentSize )
} )
2018-12-18 10:56:51 -08:00
}
}
2017-11-22 04:34:50 -08:00
func TestTombstoneClean ( t * testing . T ) {
numSamples := int64 ( 10 )
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer delete ( )
2017-11-22 04:34:50 -08:00
app := db . Appender ( )
smpls := make ( [ ] float64 , numSamples )
for i := int64 ( 0 ) ; i < numSamples ; i ++ {
smpls [ i ] = rand . Float64 ( )
app . Add ( labels . Labels { { "a" , "b" } } , i , smpls [ i ] )
}
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , app . Commit ( ) )
2017-11-22 04:34:50 -08:00
cases := [ ] struct {
intervals Intervals
remaint [ ] int64
} {
{
intervals : Intervals { { 1 , 3 } , { 4 , 7 } } ,
remaint : [ ] int64 { 0 , 8 , 9 } ,
} ,
}
for _ , c := range cases {
// Delete the ranges.
// create snapshot
snap , err := ioutil . TempDir ( "" , "snap" )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2018-02-28 03:04:55 -08:00
2019-03-19 06:31:57 -07:00
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( snap ) )
} ( )
2018-02-28 03:04:55 -08:00
testutil . Ok ( t , db . Snapshot ( snap , true ) )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , db . Close ( ) )
2017-11-22 04:34:50 -08:00
// reopen DB from snapshot
db , err = Open ( snap , nil , nil , nil )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-12-10 10:02:01 -08:00
defer db . Close ( )
2017-11-22 04:34:50 -08:00
for _ , r := range c . intervals {
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , db . Delete ( r . Mint , r . Maxt , labels . NewEqualMatcher ( "a" , "b" ) ) )
2017-11-22 04:34:50 -08:00
}
// All of the setup for THIS line.
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , db . CleanTombstones ( ) )
2017-11-22 04:34:50 -08:00
// Compare the result.
q , err := db . Querier ( 0 , numSamples )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-12-10 10:02:01 -08:00
defer q . Close ( )
2017-11-22 04:34:50 -08:00
2017-11-29 23:45:23 -08:00
res , err := q . Select ( labels . NewEqualMatcher ( "a" , "b" ) )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2017-11-22 04:34:50 -08:00
2019-01-28 03:24:49 -08:00
expSamples := make ( [ ] tsdbutil . Sample , 0 , len ( c . remaint ) )
2017-11-22 04:34:50 -08:00
for _ , ts := range c . remaint {
expSamples = append ( expSamples , sample { ts , smpls [ ts ] } )
}
2018-09-21 01:07:35 -07:00
expss := newMockSeriesSet ( [ ] Series {
2017-11-22 04:34:50 -08:00
newSeries ( map [ string ] string { "a" : "b" } , expSamples ) ,
} )
if len ( expSamples ) == 0 {
2017-12-06 17:06:14 -08:00
testutil . Assert ( t , res . Next ( ) == false , "" )
2017-11-22 04:34:50 -08:00
continue
}
for {
eok , rok := expss . Next ( ) , res . Next ( )
2017-12-06 17:06:14 -08:00
testutil . Equals ( t , eok , rok )
2017-11-22 04:34:50 -08:00
if ! eok {
break
}
sexp := expss . At ( )
sres := res . At ( )
2017-12-06 17:06:14 -08:00
testutil . Equals ( t , sexp . Labels ( ) , sres . Labels ( ) )
2017-11-22 04:34:50 -08:00
smplExp , errExp := expandSeriesIterator ( sexp . Iterator ( ) )
smplRes , errRes := expandSeriesIterator ( sres . Iterator ( ) )
2017-12-06 17:06:14 -08:00
testutil . Equals ( t , errExp , errRes )
testutil . Equals ( t , smplExp , smplRes )
2017-11-22 04:34:50 -08:00
}
2018-11-07 07:52:41 -08:00
for _ , b := range db . Blocks ( ) {
2018-11-14 08:40:01 -08:00
testutil . Equals ( t , newMemTombstones ( ) , b . tombstones )
2017-11-22 04:34:50 -08:00
}
}
}
2017-11-29 23:45:23 -08:00
2018-06-04 12:18:44 -07:00
// TestTombstoneCleanFail tests that a failing TombstoneClean doesn't leave any blocks behind.
// When TombstoneClean errors the original block that should be rebuilt doesn't get deleted so
// if TombstoneClean leaves any blocks behind these will overlap.
2018-06-04 05:35:36 -07:00
func TestTombstoneCleanFail ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2018-06-04 05:35:36 -07:00
var expectedBlockDirs [ ] string
2018-06-04 12:18:44 -07:00
// Create some empty blocks pending for compaction.
// totalBlocks should be >=2 so we have enough blocks to trigger compaction failure.
totalBlocks := 2
for i := 0 ; i < totalBlocks ; i ++ {
2019-01-28 03:24:49 -08:00
blockDir := createBlock ( t , db . Dir ( ) , genSeries ( 1 , 1 , 0 , 0 ) )
2019-01-16 02:03:52 -08:00
block , err := OpenBlock ( nil , blockDir , nil )
2018-12-29 03:20:51 -08:00
testutil . Ok ( t , err )
2018-06-04 12:18:44 -07:00
// Add some some fake tombstones to trigger the compaction.
2018-11-14 08:40:01 -08:00
tomb := newMemTombstones ( )
2018-06-25 06:52:11 -07:00
tomb . addInterval ( 0 , Interval { 0 , 1 } )
2018-06-04 05:35:36 -07:00
block . tombstones = tomb
db . blocks = append ( db . blocks , block )
expectedBlockDirs = append ( expectedBlockDirs , blockDir )
}
2018-06-04 12:18:44 -07:00
// Initialize the mockCompactorFailing with a room for a single compaction iteration.
// mockCompactorFailing will fail on the second iteration so we can check if the cleanup works as expected.
db . compactor = & mockCompactorFailing {
t : t ,
blocks : db . blocks ,
max : totalBlocks + 1 ,
}
// The compactor should trigger a failure here.
2018-06-04 05:35:36 -07:00
testutil . NotOk ( t , db . CleanTombstones ( ) )
2018-06-04 12:18:44 -07:00
// Now check that the CleanTombstones didn't leave any blocks behind after a failure.
2018-06-04 05:35:36 -07:00
actualBlockDirs , err := blockDirs ( db . dir )
testutil . Ok ( t , err )
testutil . Equals ( t , expectedBlockDirs , actualBlockDirs )
}
2018-06-04 12:18:44 -07:00
// mockCompactorFailing creates a new empty block on every write and fails when reached the max allowed total.
2018-06-04 05:35:36 -07:00
type mockCompactorFailing struct {
t * testing . T
blocks [ ] * Block
2018-06-04 12:18:44 -07:00
max int
2018-06-04 05:35:36 -07:00
}
func ( * mockCompactorFailing ) Plan ( dir string ) ( [ ] string , error ) {
return nil , nil
}
2018-06-27 06:47:11 -07:00
func ( c * mockCompactorFailing ) Write ( dest string , b BlockReader , mint , maxt int64 , parent * BlockMeta ) ( ulid . ULID , error ) {
2018-06-04 12:18:44 -07:00
if len ( c . blocks ) >= c . max {
return ulid . ULID { } , fmt . Errorf ( "the compactor already did the maximum allowed blocks so it is time to fail" )
2018-06-04 05:35:36 -07:00
}
2018-06-04 12:18:44 -07:00
2019-01-28 03:24:49 -08:00
block , err := OpenBlock ( nil , createBlock ( c . t , dest , genSeries ( 1 , 1 , 0 , 0 ) ) , nil )
2018-12-29 03:20:51 -08:00
testutil . Ok ( c . t , err )
2018-09-20 22:31:22 -07:00
testutil . Ok ( c . t , block . Close ( ) ) // Close block as we won't be using anywhere.
2018-06-04 05:35:36 -07:00
c . blocks = append ( c . blocks , block )
2018-06-04 12:18:44 -07:00
// Now check that all expected blocks are actually persisted on disk.
// This way we make sure that the we have some blocks that are supposed to be removed.
var expectedBlocks [ ] string
for _ , b := range c . blocks {
expectedBlocks = append ( expectedBlocks , filepath . Join ( dest , b . Meta ( ) . ULID . String ( ) ) )
}
actualBlockDirs , err := blockDirs ( dest )
testutil . Ok ( c . t , err )
testutil . Equals ( c . t , expectedBlocks , actualBlockDirs )
2018-06-04 05:35:36 -07:00
return block . Meta ( ) . ULID , nil
}
2018-11-15 04:20:54 -08:00
func ( * mockCompactorFailing ) Compact ( dest string , dirs [ ] string , open [ ] * Block ) ( ulid . ULID , error ) {
2018-06-04 05:35:36 -07:00
return ulid . ULID { } , nil
}
2019-01-16 02:03:52 -08:00
func TestTimeRetention ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , & Options {
2019-01-16 02:03:52 -08:00
BlockRanges : [ ] int64 { 1000 } ,
} )
2019-01-30 01:40:12 -08:00
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
blocks := [ ] * BlockMeta {
{ MinTime : 500 , MaxTime : 900 } , // Oldest block
{ MinTime : 1000 , MaxTime : 1500 } ,
{ MinTime : 1500 , MaxTime : 2000 } , // Newest Block
}
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
for _ , m := range blocks {
2019-01-28 03:24:49 -08:00
createBlock ( t , db . Dir ( ) , genSeries ( 10 , 10 , m . MinTime , m . MaxTime ) )
2019-01-16 02:03:52 -08:00
}
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
testutil . Ok ( t , db . reload ( ) ) // Reload the db to register the new blocks.
testutil . Equals ( t , len ( blocks ) , len ( db . Blocks ( ) ) ) // Ensure all blocks are registered.
2018-02-28 03:04:55 -08:00
2019-01-16 02:03:52 -08:00
db . opts . RetentionDuration = uint64 ( blocks [ 2 ] . MaxTime - blocks [ 1 ] . MinTime )
testutil . Ok ( t , db . reload ( ) )
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
expBlocks := blocks [ 1 : ]
actBlocks := db . Blocks ( )
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
testutil . Equals ( t , 1 , int ( prom_testutil . ToFloat64 ( db . metrics . timeRetentionCount ) ) , "metric retention count mismatch" )
testutil . Equals ( t , len ( expBlocks ) , len ( actBlocks ) )
testutil . Equals ( t , expBlocks [ 0 ] . MaxTime , actBlocks [ 0 ] . meta . MaxTime )
testutil . Equals ( t , expBlocks [ len ( expBlocks ) - 1 ] . MaxTime , actBlocks [ len ( actBlocks ) - 1 ] . meta . MaxTime )
}
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
func TestSizeRetention ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , & Options {
2019-01-16 02:03:52 -08:00
BlockRanges : [ ] int64 { 100 } ,
} )
2019-01-30 01:40:12 -08:00
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
blocks := [ ] * BlockMeta {
{ MinTime : 100 , MaxTime : 200 } , // Oldest block
{ MinTime : 200 , MaxTime : 300 } ,
{ MinTime : 300 , MaxTime : 400 } ,
{ MinTime : 400 , MaxTime : 500 } ,
{ MinTime : 500 , MaxTime : 600 } , // Newest Block
}
2018-02-28 03:04:55 -08:00
2019-01-16 02:03:52 -08:00
for _ , m := range blocks {
2019-01-28 03:24:49 -08:00
createBlock ( t , db . Dir ( ) , genSeries ( 100 , 10 , m . MinTime , m . MaxTime ) )
2019-01-16 02:03:52 -08:00
}
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
// Test that registered size matches the actual disk size.
testutil . Ok ( t , db . reload ( ) ) // Reload the db to register the new db size.
testutil . Equals ( t , len ( blocks ) , len ( db . Blocks ( ) ) ) // Ensure all blocks are registered.
expSize := int64 ( prom_testutil . ToFloat64 ( db . metrics . blocksBytes ) ) // Use the the actual internal metrics.
actSize := dbDiskSize ( db . Dir ( ) )
testutil . Equals ( t , expSize , actSize , "registered size doesn't match actual disk size" )
// Decrease the max bytes limit so that a delete is triggered.
// Check total size, total count and check that the oldest block was deleted.
firstBlockSize := db . Blocks ( ) [ 0 ] . Size ( )
sizeLimit := actSize - firstBlockSize
db . opts . MaxBytes = sizeLimit // Set the new db size limit one block smaller that the actual size.
testutil . Ok ( t , db . reload ( ) ) // Reload the db to register the new db size.
expBlocks := blocks [ 1 : ]
actBlocks := db . Blocks ( )
expSize = int64 ( prom_testutil . ToFloat64 ( db . metrics . blocksBytes ) )
actRetentCount := int ( prom_testutil . ToFloat64 ( db . metrics . sizeRetentionCount ) )
actSize = dbDiskSize ( db . Dir ( ) )
testutil . Equals ( t , 1 , actRetentCount , "metric retention count mismatch" )
testutil . Equals ( t , actSize , expSize , "metric db size doesn't match actual disk size" )
testutil . Assert ( t , expSize <= sizeLimit , "actual size (%v) is expected to be less than or equal to limit (%v)" , expSize , sizeLimit )
testutil . Equals ( t , len ( blocks ) - 1 , len ( actBlocks ) , "new block count should be decreased from:%v to:%v" , len ( blocks ) , len ( blocks ) - 1 )
testutil . Equals ( t , expBlocks [ 0 ] . MaxTime , actBlocks [ 0 ] . meta . MaxTime , "maxT mismatch of the first block" )
testutil . Equals ( t , expBlocks [ len ( expBlocks ) - 1 ] . MaxTime , actBlocks [ len ( actBlocks ) - 1 ] . meta . MaxTime , "maxT mismatch of the last block" )
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
}
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
func dbDiskSize ( dir string ) int64 {
var statSize int64
filepath . Walk ( dir , func ( path string , info os . FileInfo , err error ) error {
// Include only index,tombstone and chunks.
if filepath . Dir ( path ) == chunkDir ( filepath . Dir ( filepath . Dir ( path ) ) ) ||
info . Name ( ) == indexFilename ||
info . Name ( ) == tombstoneFilename {
statSize += info . Size ( )
}
return nil
} )
return statSize
2017-11-23 05:27:10 -08:00
}
2017-12-17 10:08:21 -08:00
func TestNotMatcherSelectsLabelsUnsetSeries ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2017-12-17 10:08:21 -08:00
labelpairs := [ ] labels . Labels {
labels . FromStrings ( "a" , "abcd" , "b" , "abcde" ) ,
labels . FromStrings ( "labelname" , "labelvalue" ) ,
}
app := db . Appender ( )
for _ , lbls := range labelpairs {
2018-02-23 07:04:50 -08:00
_ , err := app . Add ( lbls , 0 , 1 )
2017-12-17 10:08:21 -08:00
testutil . Ok ( t , err )
}
testutil . Ok ( t , app . Commit ( ) )
cases := [ ] struct {
selector labels . Selector
series [ ] labels . Labels
} { {
selector : labels . Selector {
labels . Not ( labels . NewEqualMatcher ( "lname" , "lvalue" ) ) ,
} ,
series : labelpairs ,
} , {
selector : labels . Selector {
labels . NewEqualMatcher ( "a" , "abcd" ) ,
labels . Not ( labels . NewEqualMatcher ( "b" , "abcde" ) ) ,
} ,
series : [ ] labels . Labels { } ,
} , {
selector : labels . Selector {
labels . NewEqualMatcher ( "a" , "abcd" ) ,
labels . Not ( labels . NewEqualMatcher ( "b" , "abc" ) ) ,
} ,
series : [ ] labels . Labels { labelpairs [ 0 ] } ,
} , {
selector : labels . Selector {
labels . Not ( labels . NewMustRegexpMatcher ( "a" , "abd.*" ) ) ,
} ,
series : labelpairs ,
} , {
selector : labels . Selector {
labels . Not ( labels . NewMustRegexpMatcher ( "a" , "abc.*" ) ) ,
} ,
series : labelpairs [ 1 : ] ,
} , {
selector : labels . Selector {
labels . Not ( labels . NewMustRegexpMatcher ( "c" , "abd.*" ) ) ,
} ,
series : labelpairs ,
} , {
selector : labels . Selector {
labels . Not ( labels . NewMustRegexpMatcher ( "labelname" , "labelvalue" ) ) ,
} ,
series : labelpairs [ : 1 ] ,
} }
q , err := db . Querier ( 0 , 10 )
testutil . Ok ( t , err )
2019-01-30 01:40:12 -08:00
defer func ( ) { testutil . Ok ( t , q . Close ( ) ) } ( )
2017-12-17 10:08:21 -08:00
for _ , c := range cases {
ss , err := q . Select ( c . selector ... )
testutil . Ok ( t , err )
lres , err := expandSeriesSet ( ss )
testutil . Ok ( t , err )
testutil . Equals ( t , c . series , lres )
}
}
func expandSeriesSet ( ss SeriesSet ) ( [ ] labels . Labels , error ) {
result := [ ] labels . Labels { }
for ss . Next ( ) {
result = append ( result , ss . At ( ) . Labels ( ) )
}
return result , ss . Err ( )
}
2018-03-28 07:50:52 -07:00
2018-03-28 10:33:41 -07:00
func TestOverlappingBlocksDetectsAllOverlaps ( t * testing . T ) {
// Create 10 blocks that does not overlap (0-10, 10-20, ..., 100-110) but in reverse order to ensure our algorithm
// will handle that.
var metas = make ( [ ] BlockMeta , 11 )
for i := 10 ; i >= 0 ; i -- {
2018-03-28 15:19:22 -07:00
metas [ i ] = BlockMeta { MinTime : int64 ( i * 10 ) , MaxTime : int64 ( ( i + 1 ) * 10 ) }
2018-03-28 07:50:52 -07:00
}
2018-03-28 10:33:41 -07:00
testutil . Assert ( t , len ( OverlappingBlocks ( metas ) ) == 0 , "we found unexpected overlaps" )
2018-03-28 07:50:52 -07:00
2018-05-22 05:51:20 -07:00
// Add overlapping blocks. We've to establish order again since we aren't interested
// in trivial overlaps caused by unorderedness.
add := func ( ms ... BlockMeta ) [ ] BlockMeta {
repl := append ( append ( [ ] BlockMeta { } , metas ... ) , ms ... )
sort . Slice ( repl , func ( i , j int ) bool {
return repl [ i ] . MinTime < repl [ j ] . MinTime
} )
return repl
}
2018-03-28 07:50:52 -07:00
// o1 overlaps with 10-20.
2018-03-28 15:19:22 -07:00
o1 := BlockMeta { MinTime : 15 , MaxTime : 17 }
2018-04-05 06:15:24 -07:00
testutil . Equals ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 15 , Max : 17 } : { metas [ 1 ] , o1 } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o1 ) ) )
2018-03-28 10:33:41 -07:00
// o2 overlaps with 20-30 and 30-40.
2018-03-28 15:19:22 -07:00
o2 := BlockMeta { MinTime : 21 , MaxTime : 31 }
2018-04-05 06:15:24 -07:00
testutil . Equals ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 21 , Max : 30 } : { metas [ 2 ] , o2 } ,
{ Min : 30 , Max : 31 } : { o2 , metas [ 3 ] } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o2 ) ) )
2018-03-28 10:33:41 -07:00
// o3a and o3b overlaps with 30-40 and each other.
2018-03-28 15:19:22 -07:00
o3a := BlockMeta { MinTime : 33 , MaxTime : 39 }
o3b := BlockMeta { MinTime : 34 , MaxTime : 36 }
2018-04-05 06:15:24 -07:00
testutil . Equals ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 34 , Max : 36 } : { metas [ 3 ] , o3a , o3b } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o3a , o3b ) ) )
2018-03-28 10:33:41 -07:00
// o4 is 1:1 overlap with 50-60.
2018-03-28 15:19:22 -07:00
o4 := BlockMeta { MinTime : 50 , MaxTime : 60 }
2018-04-05 06:15:24 -07:00
testutil . Equals ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 50 , Max : 60 } : { metas [ 5 ] , o4 } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o4 ) ) )
2018-03-28 10:33:41 -07:00
// o5 overlaps with 60-70, 70-80 and 80-90.
2018-03-28 15:19:22 -07:00
o5 := BlockMeta { MinTime : 61 , MaxTime : 85 }
2018-04-05 06:15:24 -07:00
testutil . Equals ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 61 , Max : 70 } : { metas [ 6 ] , o5 } ,
{ Min : 70 , Max : 80 } : { o5 , metas [ 7 ] } ,
{ Min : 80 , Max : 85 } : { o5 , metas [ 8 ] } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o5 ) ) )
2018-03-28 10:33:41 -07:00
// o6a overlaps with 90-100, 100-110 and o6b, o6b overlaps with 90-100 and o6a.
2018-03-28 15:19:22 -07:00
o6a := BlockMeta { MinTime : 92 , MaxTime : 105 }
o6b := BlockMeta { MinTime : 94 , MaxTime : 99 }
2018-04-05 06:15:24 -07:00
testutil . Equals ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 94 , Max : 99 } : { metas [ 9 ] , o6a , o6b } ,
{ Min : 100 , Max : 105 } : { o6a , metas [ 10 ] } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o6a , o6b ) ) )
2018-03-28 15:50:42 -07:00
// All together.
2018-04-05 06:15:24 -07:00
testutil . Equals ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 15 , Max : 17 } : { metas [ 1 ] , o1 } ,
{ Min : 21 , Max : 30 } : { metas [ 2 ] , o2 } , { Min : 30 , Max : 31 } : { o2 , metas [ 3 ] } ,
{ Min : 34 , Max : 36 } : { metas [ 3 ] , o3a , o3b } ,
{ Min : 50 , Max : 60 } : { metas [ 5 ] , o4 } ,
{ Min : 61 , Max : 70 } : { metas [ 6 ] , o5 } , { Min : 70 , Max : 80 } : { o5 , metas [ 7 ] } , { Min : 80 , Max : 85 } : { o5 , metas [ 8 ] } ,
{ Min : 94 , Max : 99 } : { metas [ 9 ] , o6a , o6b } , { Min : 100 , Max : 105 } : { o6a , metas [ 10 ] } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o1 , o2 , o3a , o3b , o4 , o5 , o6a , o6b ) ) )
2018-03-29 04:50:46 -07:00
2018-04-05 05:51:33 -07:00
// Additional case.
2018-03-29 04:50:46 -07:00
var nc1 [ ] BlockMeta
nc1 = append ( nc1 , BlockMeta { MinTime : 1 , MaxTime : 5 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 3 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 3 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 3 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 3 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 6 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 3 , MaxTime : 5 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 5 , MaxTime : 7 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 7 , MaxTime : 10 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 8 , MaxTime : 9 } )
2018-04-05 06:15:24 -07:00
testutil . Equals ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 2 , Max : 3 } : { nc1 [ 0 ] , nc1 [ 1 ] , nc1 [ 2 ] , nc1 [ 3 ] , nc1 [ 4 ] , nc1 [ 5 ] } , // 1-5, 2-3, 2-3, 2-3, 2-3, 2,6
{ Min : 3 , Max : 5 } : { nc1 [ 0 ] , nc1 [ 5 ] , nc1 [ 6 ] } , // 1-5, 2-6, 3-5
{ Min : 5 , Max : 6 } : { nc1 [ 5 ] , nc1 [ 7 ] } , // 2-6, 5-7
{ Min : 8 , Max : 9 } : { nc1 [ 8 ] , nc1 [ 9 ] } , // 7-10, 8-9
2018-03-29 04:50:46 -07:00
} , OverlappingBlocks ( nc1 ) )
2018-03-28 10:33:41 -07:00
}
2018-06-13 02:24:28 -07:00
// Regression test for https://github.com/prometheus/tsdb/issues/347
func TestChunkAtBlockBoundary ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2018-06-13 02:24:28 -07:00
app := db . Appender ( )
blockRange := DefaultOptions . BlockRanges [ 0 ]
label := labels . FromStrings ( "foo" , "bar" )
for i := int64 ( 0 ) ; i < 3 ; i ++ {
_ , err := app . Add ( label , i * blockRange , 0 )
testutil . Ok ( t , err )
_ , err = app . Add ( label , i * blockRange + 1000 , 0 )
testutil . Ok ( t , err )
}
err := app . Commit ( )
testutil . Ok ( t , err )
2018-09-20 23:24:01 -07:00
err = db . compact ( )
2018-06-13 02:24:28 -07:00
testutil . Ok ( t , err )
2018-11-07 07:52:41 -08:00
for _ , block := range db . Blocks ( ) {
2018-06-13 02:24:28 -07:00
r , err := block . Index ( )
testutil . Ok ( t , err )
defer r . Close ( )
meta := block . Meta ( )
p , err := r . Postings ( index . AllPostingsKey ( ) )
testutil . Ok ( t , err )
var (
lset labels . Labels
chks [ ] chunks . Meta
)
chunkCount := 0
for p . Next ( ) {
err = r . Series ( p . At ( ) , & lset , & chks )
testutil . Ok ( t , err )
for _ , c := range chks {
testutil . Assert ( t , meta . MinTime <= c . MinTime && c . MaxTime <= meta . MaxTime ,
"chunk spans beyond block boundaries: [block.MinTime=%d, block.MaxTime=%d]; [chunk.MinTime=%d, chunk.MaxTime=%d]" ,
meta . MinTime , meta . MaxTime , c . MinTime , c . MaxTime )
chunkCount ++
}
}
testutil . Assert ( t , chunkCount == 1 , "expected 1 chunk in block %s, got %d" , meta . ULID , chunkCount )
}
}
2018-06-14 06:29:32 -07:00
func TestQuerierWithBoundaryChunks ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2018-06-14 06:29:32 -07:00
app := db . Appender ( )
blockRange := DefaultOptions . BlockRanges [ 0 ]
label := labels . FromStrings ( "foo" , "bar" )
for i := int64 ( 0 ) ; i < 5 ; i ++ {
_ , err := app . Add ( label , i * blockRange , 0 )
testutil . Ok ( t , err )
}
err := app . Commit ( )
testutil . Ok ( t , err )
2018-09-20 23:24:01 -07:00
err = db . compact ( )
2018-06-14 06:29:32 -07:00
testutil . Ok ( t , err )
testutil . Assert ( t , len ( db . blocks ) >= 3 , "invalid test, less than three blocks in DB" )
q , err := db . Querier ( blockRange , 2 * blockRange )
testutil . Ok ( t , err )
defer q . Close ( )
// The requested interval covers 2 blocks, so the querier should contain 2 blocks.
count := len ( q . ( * querier ) . blocks )
testutil . Assert ( t , count == 2 , "expected 2 blocks in querier, got %d" , count )
}
2018-05-25 14:19:32 -07:00
2018-12-04 02:30:49 -08:00
// TestInitializeHeadTimestamp ensures that the h.minTime is set properly.
// - no blocks no WAL: set to the time of the first appended sample
// - no blocks with WAL: set to the smallest sample from the WAL
// - with blocks no WAL: set to the last block maxT
// - with blocks with WAL: same as above
2018-05-25 14:19:32 -07:00
func TestInitializeHeadTimestamp ( t * testing . T ) {
t . Run ( "clean" , func ( t * testing . T ) {
dir , err := ioutil . TempDir ( "" , "test_head_init" )
testutil . Ok ( t , err )
2019-03-19 06:31:57 -07:00
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( dir ) )
} ( )
2018-05-25 14:19:32 -07:00
db , err := Open ( dir , nil , nil , nil )
testutil . Ok ( t , err )
2019-03-19 06:31:57 -07:00
defer db . Close ( )
2018-05-25 14:19:32 -07:00
// Should be set to init values if no WAL or blocks exist so far.
testutil . Equals ( t , int64 ( math . MaxInt64 ) , db . head . MinTime ( ) )
testutil . Equals ( t , int64 ( math . MinInt64 ) , db . head . MaxTime ( ) )
// First added sample initializes the writable range.
app := db . Appender ( )
_ , err = app . Add ( labels . FromStrings ( "a" , "b" ) , 1000 , 1 )
testutil . Ok ( t , err )
testutil . Equals ( t , int64 ( 1000 ) , db . head . MinTime ( ) )
testutil . Equals ( t , int64 ( 1000 ) , db . head . MaxTime ( ) )
} )
t . Run ( "wal-only" , func ( t * testing . T ) {
dir , err := ioutil . TempDir ( "" , "test_head_init" )
testutil . Ok ( t , err )
2019-03-19 06:31:57 -07:00
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( dir ) )
} ( )
2018-05-25 14:19:32 -07:00
testutil . Ok ( t , os . MkdirAll ( path . Join ( dir , "wal" ) , 0777 ) )
w , err := wal . New ( nil , nil , path . Join ( dir , "wal" ) )
testutil . Ok ( t , err )
var enc RecordEncoder
err = w . Log (
enc . Series ( [ ] RefSeries {
{ Ref : 123 , Labels : labels . FromStrings ( "a" , "1" ) } ,
{ Ref : 124 , Labels : labels . FromStrings ( "a" , "2" ) } ,
} , nil ) ,
enc . Samples ( [ ] RefSample {
{ Ref : 123 , T : 5000 , V : 1 } ,
{ Ref : 124 , T : 15000 , V : 1 } ,
} , nil ) ,
)
testutil . Ok ( t , err )
testutil . Ok ( t , w . Close ( ) )
db , err := Open ( dir , nil , nil , nil )
testutil . Ok ( t , err )
2019-03-19 06:31:57 -07:00
defer db . Close ( )
2018-05-25 14:19:32 -07:00
testutil . Equals ( t , int64 ( 5000 ) , db . head . MinTime ( ) )
testutil . Equals ( t , int64 ( 15000 ) , db . head . MaxTime ( ) )
} )
t . Run ( "existing-block" , func ( t * testing . T ) {
dir , err := ioutil . TempDir ( "" , "test_head_init" )
testutil . Ok ( t , err )
2019-03-19 06:31:57 -07:00
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( dir ) )
} ( )
2018-05-25 14:19:32 -07:00
2019-01-28 03:24:49 -08:00
createBlock ( t , dir , genSeries ( 1 , 1 , 1000 , 2000 ) )
2018-05-25 14:19:32 -07:00
db , err := Open ( dir , nil , nil , nil )
testutil . Ok ( t , err )
2019-03-19 06:31:57 -07:00
defer db . Close ( )
2018-05-25 14:19:32 -07:00
testutil . Equals ( t , int64 ( 2000 ) , db . head . MinTime ( ) )
testutil . Equals ( t , int64 ( 2000 ) , db . head . MaxTime ( ) )
} )
t . Run ( "existing-block-and-wal" , func ( t * testing . T ) {
dir , err := ioutil . TempDir ( "" , "test_head_init" )
testutil . Ok ( t , err )
2019-03-19 06:31:57 -07:00
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( dir ) )
} ( )
2018-05-25 14:19:32 -07:00
2019-01-28 03:24:49 -08:00
createBlock ( t , dir , genSeries ( 1 , 1 , 1000 , 6000 ) )
2018-05-25 14:19:32 -07:00
testutil . Ok ( t , os . MkdirAll ( path . Join ( dir , "wal" ) , 0777 ) )
w , err := wal . New ( nil , nil , path . Join ( dir , "wal" ) )
testutil . Ok ( t , err )
var enc RecordEncoder
err = w . Log (
enc . Series ( [ ] RefSeries {
{ Ref : 123 , Labels : labels . FromStrings ( "a" , "1" ) } ,
{ Ref : 124 , Labels : labels . FromStrings ( "a" , "2" ) } ,
} , nil ) ,
enc . Samples ( [ ] RefSample {
{ Ref : 123 , T : 5000 , V : 1 } ,
{ Ref : 124 , T : 15000 , V : 1 } ,
} , nil ) ,
)
testutil . Ok ( t , err )
testutil . Ok ( t , w . Close ( ) )
2018-11-28 01:23:50 -08:00
r := prometheus . NewRegistry ( )
db , err := Open ( dir , nil , r , nil )
2018-05-25 14:19:32 -07:00
testutil . Ok ( t , err )
2019-03-19 06:31:57 -07:00
defer db . Close ( )
2018-05-25 14:19:32 -07:00
testutil . Equals ( t , int64 ( 6000 ) , db . head . MinTime ( ) )
testutil . Equals ( t , int64 ( 15000 ) , db . head . MaxTime ( ) )
2018-11-28 01:23:50 -08:00
// Check that old series has been GCed.
testutil . Equals ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . series ) )
2018-05-25 14:19:32 -07:00
} )
}
2018-09-27 04:43:22 -07:00
2019-01-18 00:35:16 -08:00
func TestNoEmptyBlocks ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , & Options {
2019-01-18 00:35:16 -08:00
BlockRanges : [ ] int64 { 100 } ,
} )
2019-01-30 01:40:12 -08:00
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2019-01-18 00:35:16 -08:00
db . DisableCompactions ( )
2019-01-29 00:32:59 -08:00
rangeToTriggerCompaction := db . opts . BlockRanges [ 0 ] / 2 * 3 - 1
2019-01-18 00:35:16 -08:00
defaultLabel := labels . FromStrings ( "foo" , "bar" )
defaultMatcher := labels . NewMustRegexpMatcher ( "" , ".*" )
t . Run ( "Test no blocks after compact with empty head." , func ( t * testing . T ) {
testutil . Ok ( t , db . compact ( ) )
actBlocks , err := blockDirs ( db . Dir ( ) )
testutil . Ok ( t , err )
testutil . Equals ( t , len ( db . Blocks ( ) ) , len ( actBlocks ) )
testutil . Equals ( t , 0 , len ( actBlocks ) )
testutil . Equals ( t , 0 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . ran ) ) , "no compaction should be triggered here" )
} )
t . Run ( "Test no blocks after deleting all samples from head." , func ( t * testing . T ) {
app := db . Appender ( )
_ , err := app . Add ( defaultLabel , 1 , 0 )
testutil . Ok ( t , err )
_ , err = app . Add ( defaultLabel , 2 , 0 )
testutil . Ok ( t , err )
2019-01-29 00:32:59 -08:00
_ , err = app . Add ( defaultLabel , 3 + rangeToTriggerCompaction , 0 )
2019-01-18 00:35:16 -08:00
testutil . Ok ( t , err )
testutil . Ok ( t , app . Commit ( ) )
testutil . Ok ( t , db . Delete ( math . MinInt64 , math . MaxInt64 , defaultMatcher ) )
testutil . Ok ( t , db . compact ( ) )
testutil . Equals ( t , 1 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . ran ) ) , "compaction should have been triggered here" )
actBlocks , err := blockDirs ( db . Dir ( ) )
testutil . Ok ( t , err )
testutil . Equals ( t , len ( db . Blocks ( ) ) , len ( actBlocks ) )
testutil . Equals ( t , 0 , len ( actBlocks ) )
app = db . Appender ( )
_ , err = app . Add ( defaultLabel , 1 , 0 )
testutil . Assert ( t , err == ErrOutOfBounds , "the head should be truncated so no samples in the past should be allowed" )
// Adding new blocks.
currentTime := db . Head ( ) . MaxTime ( )
_ , err = app . Add ( defaultLabel , currentTime , 0 )
testutil . Ok ( t , err )
_ , err = app . Add ( defaultLabel , currentTime + 1 , 0 )
testutil . Ok ( t , err )
2019-01-29 00:32:59 -08:00
_ , err = app . Add ( defaultLabel , currentTime + rangeToTriggerCompaction , 0 )
2019-01-18 00:35:16 -08:00
testutil . Ok ( t , err )
testutil . Ok ( t , app . Commit ( ) )
testutil . Ok ( t , db . compact ( ) )
testutil . Equals ( t , 2 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . ran ) ) , "compaction should have been triggered here" )
actBlocks , err = blockDirs ( db . Dir ( ) )
testutil . Ok ( t , err )
testutil . Equals ( t , len ( db . Blocks ( ) ) , len ( actBlocks ) )
testutil . Assert ( t , len ( actBlocks ) == 1 , "No blocks created when compacting with >0 samples" )
} )
2019-03-25 16:38:12 -07:00
t . Run ( ` When no new block is created from head , and there are some blocks on disk
2019-01-18 00:35:16 -08:00
compaction should not run into infinite loop ( was seen during development ) . ` , func ( t * testing . T ) {
oldBlocks := db . Blocks ( )
app := db . Appender ( )
currentTime := db . Head ( ) . MaxTime ( )
_ , err := app . Add ( defaultLabel , currentTime , 0 )
testutil . Ok ( t , err )
_ , err = app . Add ( defaultLabel , currentTime + 1 , 0 )
testutil . Ok ( t , err )
2019-01-29 00:32:59 -08:00
_ , err = app . Add ( defaultLabel , currentTime + rangeToTriggerCompaction , 0 )
2019-01-18 00:35:16 -08:00
testutil . Ok ( t , err )
testutil . Ok ( t , app . Commit ( ) )
testutil . Ok ( t , db . head . Delete ( math . MinInt64 , math . MaxInt64 , defaultMatcher ) )
testutil . Ok ( t , db . compact ( ) )
testutil . Equals ( t , 3 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . ran ) ) , "compaction should have been triggered here" )
testutil . Equals ( t , oldBlocks , db . Blocks ( ) )
} )
t . Run ( "Test no blocks remaining after deleting all samples from disk." , func ( t * testing . T ) {
currentTime := db . Head ( ) . MaxTime ( )
blocks := [ ] * BlockMeta {
{ MinTime : currentTime , MaxTime : currentTime + db . opts . BlockRanges [ 0 ] } ,
{ MinTime : currentTime + 100 , MaxTime : currentTime + 100 + db . opts . BlockRanges [ 0 ] } ,
}
for _ , m := range blocks {
2019-01-28 03:24:49 -08:00
createBlock ( t , db . Dir ( ) , genSeries ( 2 , 2 , m . MinTime , m . MaxTime ) )
2019-01-18 00:35:16 -08:00
}
oldBlocks := db . Blocks ( )
testutil . Ok ( t , db . reload ( ) ) // Reload the db to register the new blocks.
testutil . Equals ( t , len ( blocks ) + len ( oldBlocks ) , len ( db . Blocks ( ) ) ) // Ensure all blocks are registered.
testutil . Ok ( t , db . Delete ( math . MinInt64 , math . MaxInt64 , defaultMatcher ) )
testutil . Ok ( t , db . compact ( ) )
testutil . Equals ( t , 5 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . ran ) ) , "compaction should have been triggered here once for each block that have tombstones" )
actBlocks , err := blockDirs ( db . Dir ( ) )
testutil . Ok ( t , err )
testutil . Equals ( t , len ( db . Blocks ( ) ) , len ( actBlocks ) )
testutil . Equals ( t , 1 , len ( actBlocks ) , "All samples are deleted. Only the most recent block should remain after compaction." )
} )
}
2018-11-07 07:52:41 -08:00
func TestDB_LabelNames ( t * testing . T ) {
tests := [ ] struct {
// Add 'sampleLabels1' -> Test Head -> Compact -> Test Disk ->
// -> Add 'sampleLabels2' -> Test Head+Disk
sampleLabels1 [ ] [ 2 ] string // For checking head and disk separately.
// To test Head+Disk, sampleLabels2 should have
// at least 1 unique label name which is not in sampleLabels1.
sampleLabels2 [ ] [ 2 ] string // // For checking head and disk together.
exp1 [ ] string // after adding sampleLabels1.
exp2 [ ] string // after adding sampleLabels1 and sampleLabels2.
} {
{
sampleLabels1 : [ ] [ 2 ] string {
[ 2 ] string { "name1" , "" } ,
[ 2 ] string { "name3" , "" } ,
[ 2 ] string { "name2" , "" } ,
} ,
sampleLabels2 : [ ] [ 2 ] string {
[ 2 ] string { "name4" , "" } ,
[ 2 ] string { "name1" , "" } ,
} ,
exp1 : [ ] string { "name1" , "name2" , "name3" } ,
exp2 : [ ] string { "name1" , "name2" , "name3" , "name4" } ,
} ,
{
sampleLabels1 : [ ] [ 2 ] string {
[ 2 ] string { "name2" , "" } ,
[ 2 ] string { "name1" , "" } ,
[ 2 ] string { "name2" , "" } ,
} ,
sampleLabels2 : [ ] [ 2 ] string {
[ 2 ] string { "name6" , "" } ,
[ 2 ] string { "name0" , "" } ,
} ,
exp1 : [ ] string { "name1" , "name2" } ,
exp2 : [ ] string { "name0" , "name1" , "name2" , "name6" } ,
} ,
}
blockRange := DefaultOptions . BlockRanges [ 0 ]
// Appends samples into the database.
appendSamples := func ( db * DB , mint , maxt int64 , sampleLabels [ ] [ 2 ] string ) {
t . Helper ( )
app := db . Appender ( )
for i := mint ; i <= maxt ; i ++ {
for _ , tuple := range sampleLabels {
label := labels . FromStrings ( tuple [ 0 ] , tuple [ 1 ] )
_ , err := app . Add ( label , i * blockRange , 0 )
testutil . Ok ( t , err )
}
}
err := app . Commit ( )
testutil . Ok ( t , err )
}
for _ , tst := range tests {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2018-11-07 07:52:41 -08:00
appendSamples ( db , 0 , 4 , tst . sampleLabels1 )
// Testing head.
headIndexr , err := db . head . Index ( )
testutil . Ok ( t , err )
labelNames , err := headIndexr . LabelNames ( )
testutil . Ok ( t , err )
testutil . Equals ( t , tst . exp1 , labelNames )
testutil . Ok ( t , headIndexr . Close ( ) )
// Testing disk.
err = db . compact ( )
testutil . Ok ( t , err )
// All blocks have same label names, hence check them individually.
// No need to aggregrate and check.
for _ , b := range db . Blocks ( ) {
blockIndexr , err := b . Index ( )
testutil . Ok ( t , err )
labelNames , err = blockIndexr . LabelNames ( )
testutil . Ok ( t , err )
testutil . Equals ( t , tst . exp1 , labelNames )
testutil . Ok ( t , blockIndexr . Close ( ) )
}
// Addings more samples to head with new label names
2018-11-16 10:02:24 -08:00
// so that we can test (head+disk).LabelNames() (the union).
2018-11-07 07:52:41 -08:00
appendSamples ( db , 5 , 9 , tst . sampleLabels2 )
// Testing DB (union).
2018-11-16 10:02:24 -08:00
q , err := db . Querier ( math . MinInt64 , math . MaxInt64 )
2018-11-07 07:52:41 -08:00
testutil . Ok ( t , err )
2018-11-16 10:02:24 -08:00
labelNames , err = q . LabelNames ( )
testutil . Ok ( t , err )
testutil . Ok ( t , q . Close ( ) )
2018-11-07 07:52:41 -08:00
testutil . Equals ( t , tst . exp2 , labelNames )
}
}
2018-09-27 04:43:22 -07:00
func TestCorrectNumTombstones ( t * testing . T ) {
2019-01-30 01:40:12 -08:00
db , delete := openTestDB ( t , nil )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
delete ( )
} ( )
2018-09-27 04:43:22 -07:00
blockRange := DefaultOptions . BlockRanges [ 0 ]
2019-01-18 00:35:16 -08:00
defaultLabel := labels . FromStrings ( "foo" , "bar" )
defaultMatcher := labels . NewEqualMatcher ( defaultLabel [ 0 ] . Name , defaultLabel [ 0 ] . Value )
2018-09-27 04:43:22 -07:00
app := db . Appender ( )
for i := int64 ( 0 ) ; i < 3 ; i ++ {
for j := int64 ( 0 ) ; j < 15 ; j ++ {
2019-01-18 00:35:16 -08:00
_ , err := app . Add ( defaultLabel , i * blockRange + j , 0 )
2018-09-27 04:43:22 -07:00
testutil . Ok ( t , err )
}
}
testutil . Ok ( t , app . Commit ( ) )
2018-09-28 03:26:29 -07:00
err := db . compact ( )
2018-09-27 04:43:22 -07:00
testutil . Ok ( t , err )
testutil . Equals ( t , 1 , len ( db . blocks ) )
2019-01-18 00:35:16 -08:00
testutil . Ok ( t , db . Delete ( 0 , 1 , defaultMatcher ) )
2018-09-27 04:43:22 -07:00
testutil . Equals ( t , uint64 ( 1 ) , db . blocks [ 0 ] . meta . Stats . NumTombstones )
// {0, 1} and {2, 3} are merged to form 1 tombstone.
2019-01-18 00:35:16 -08:00
testutil . Ok ( t , db . Delete ( 2 , 3 , defaultMatcher ) )
2018-09-27 04:43:22 -07:00
testutil . Equals ( t , uint64 ( 1 ) , db . blocks [ 0 ] . meta . Stats . NumTombstones )
2019-01-18 00:35:16 -08:00
testutil . Ok ( t , db . Delete ( 5 , 6 , defaultMatcher ) )
2018-09-27 04:43:22 -07:00
testutil . Equals ( t , uint64 ( 2 ) , db . blocks [ 0 ] . meta . Stats . NumTombstones )
2019-01-18 00:35:16 -08:00
testutil . Ok ( t , db . Delete ( 9 , 11 , defaultMatcher ) )
2018-09-27 04:43:22 -07:00
testutil . Equals ( t , uint64 ( 3 ) , db . blocks [ 0 ] . meta . Stats . NumTombstones )
}
2018-12-04 02:30:49 -08:00
2019-02-14 05:29:41 -08:00
func TestVerticalCompaction ( t * testing . T ) {
cases := [ ] struct {
blockSeries [ ] [ ] Series
expSeries map [ string ] [ ] tsdbutil . Sample
} {
// Case 0
// |--------------|
// |----------------|
{
blockSeries : [ ] [ ] Series {
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 4 , 0 } ,
sample { 5 , 0 } , sample { 7 , 0 } , sample { 8 , 0 } , sample { 9 , 0 } ,
} ) ,
} ,
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 3 , 99 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 7 , 99 } ,
sample { 8 , 99 } , sample { 9 , 99 } , sample { 10 , 99 } , sample { 11 , 99 } ,
sample { 12 , 99 } , sample { 13 , 99 } , sample { 14 , 99 } ,
} ) ,
} ,
} ,
expSeries : map [ string ] [ ] tsdbutil . Sample { ` { a="b"} ` : {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 3 , 99 } ,
sample { 4 , 0 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 7 , 99 } ,
sample { 8 , 99 } , sample { 9 , 99 } , sample { 10 , 99 } , sample { 11 , 99 } ,
sample { 12 , 99 } , sample { 13 , 99 } , sample { 14 , 99 } ,
} } ,
} ,
// Case 1
// |-------------------------------|
// |----------------|
{
blockSeries : [ ] [ ] Series {
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 4 , 0 } ,
sample { 5 , 0 } , sample { 7 , 0 } , sample { 8 , 0 } , sample { 9 , 0 } ,
sample { 11 , 0 } , sample { 13 , 0 } , sample { 17 , 0 } ,
} ) ,
} ,
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 3 , 99 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 7 , 99 } ,
sample { 8 , 99 } , sample { 9 , 99 } , sample { 10 , 99 } ,
} ) ,
} ,
} ,
expSeries : map [ string ] [ ] tsdbutil . Sample { ` { a="b"} ` : {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 3 , 99 } ,
sample { 4 , 0 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 7 , 99 } ,
sample { 8 , 99 } , sample { 9 , 99 } , sample { 10 , 99 } , sample { 11 , 0 } ,
sample { 13 , 0 } , sample { 17 , 0 } ,
} } ,
} ,
// Case 2
// |-------------------------------|
// |------------|
// |--------------------|
{
blockSeries : [ ] [ ] Series {
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 4 , 0 } ,
sample { 5 , 0 } , sample { 7 , 0 } , sample { 8 , 0 } , sample { 9 , 0 } ,
sample { 11 , 0 } , sample { 13 , 0 } , sample { 17 , 0 } ,
} ) ,
} ,
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 3 , 99 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 7 , 99 } ,
sample { 8 , 99 } , sample { 9 , 99 } ,
} ) ,
} ,
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 14 , 59 } , sample { 15 , 59 } , sample { 17 , 59 } , sample { 20 , 59 } ,
sample { 21 , 59 } , sample { 22 , 59 } ,
} ) ,
} ,
} ,
expSeries : map [ string ] [ ] tsdbutil . Sample { ` { a="b"} ` : {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 3 , 99 } ,
sample { 4 , 0 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 7 , 99 } ,
sample { 8 , 99 } , sample { 9 , 99 } , sample { 11 , 0 } , sample { 13 , 0 } ,
sample { 14 , 59 } , sample { 15 , 59 } , sample { 17 , 59 } , sample { 20 , 59 } ,
sample { 21 , 59 } , sample { 22 , 59 } ,
} } ,
} ,
// Case 3
// |-------------------|
// |--------------------|
// |----------------|
{
blockSeries : [ ] [ ] Series {
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 4 , 0 } ,
sample { 5 , 0 } , sample { 8 , 0 } , sample { 9 , 0 } ,
} ) ,
} ,
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 14 , 59 } , sample { 15 , 59 } , sample { 17 , 59 } , sample { 20 , 59 } ,
sample { 21 , 59 } , sample { 22 , 59 } ,
} ) ,
} ,
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 5 , 99 } , sample { 6 , 99 } , sample { 7 , 99 } , sample { 8 , 99 } ,
sample { 9 , 99 } , sample { 10 , 99 } , sample { 13 , 99 } , sample { 15 , 99 } ,
sample { 16 , 99 } , sample { 17 , 99 } ,
} ) ,
} ,
} ,
expSeries : map [ string ] [ ] tsdbutil . Sample { ` { a="b"} ` : {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 4 , 0 } ,
sample { 5 , 99 } , sample { 6 , 99 } , sample { 7 , 99 } , sample { 8 , 99 } ,
sample { 9 , 99 } , sample { 10 , 99 } , sample { 13 , 99 } , sample { 14 , 59 } ,
sample { 15 , 59 } , sample { 16 , 99 } , sample { 17 , 59 } , sample { 20 , 59 } ,
sample { 21 , 59 } , sample { 22 , 59 } ,
} } ,
} ,
// Case 4
// |-------------------------------------|
// |------------|
// |-------------------------|
{
blockSeries : [ ] [ ] Series {
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 4 , 0 } ,
sample { 5 , 0 } , sample { 8 , 0 } , sample { 9 , 0 } , sample { 10 , 0 } ,
sample { 13 , 0 } , sample { 15 , 0 } , sample { 16 , 0 } , sample { 17 , 0 } ,
sample { 20 , 0 } , sample { 22 , 0 } ,
} ) ,
} ,
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 7 , 59 } , sample { 8 , 59 } , sample { 9 , 59 } , sample { 10 , 59 } ,
sample { 11 , 59 } ,
} ) ,
} ,
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 3 , 99 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 8 , 99 } ,
sample { 9 , 99 } , sample { 10 , 99 } , sample { 13 , 99 } , sample { 15 , 99 } ,
sample { 16 , 99 } , sample { 17 , 99 } ,
} ) ,
} ,
} ,
expSeries : map [ string ] [ ] tsdbutil . Sample { ` { a="b"} ` : {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 3 , 99 } ,
sample { 4 , 0 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 7 , 59 } ,
sample { 8 , 59 } , sample { 9 , 59 } , sample { 10 , 59 } , sample { 11 , 59 } ,
sample { 13 , 99 } , sample { 15 , 99 } , sample { 16 , 99 } , sample { 17 , 99 } ,
sample { 20 , 0 } , sample { 22 , 0 } ,
} } ,
} ,
// Case 5: series are merged properly when there are multiple series.
// |-------------------------------------|
// |------------|
// |-------------------------|
{
blockSeries : [ ] [ ] Series {
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 4 , 0 } ,
sample { 5 , 0 } , sample { 8 , 0 } , sample { 9 , 0 } , sample { 10 , 0 } ,
sample { 13 , 0 } , sample { 15 , 0 } , sample { 16 , 0 } , sample { 17 , 0 } ,
sample { 20 , 0 } , sample { 22 , 0 } ,
} ) ,
newSeries ( map [ string ] string { "b" : "c" } , [ ] tsdbutil . Sample {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 4 , 0 } ,
sample { 5 , 0 } , sample { 8 , 0 } , sample { 9 , 0 } , sample { 10 , 0 } ,
sample { 13 , 0 } , sample { 15 , 0 } , sample { 16 , 0 } , sample { 17 , 0 } ,
sample { 20 , 0 } , sample { 22 , 0 } ,
} ) ,
newSeries ( map [ string ] string { "c" : "d" } , [ ] tsdbutil . Sample {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 4 , 0 } ,
sample { 5 , 0 } , sample { 8 , 0 } , sample { 9 , 0 } , sample { 10 , 0 } ,
sample { 13 , 0 } , sample { 15 , 0 } , sample { 16 , 0 } , sample { 17 , 0 } ,
sample { 20 , 0 } , sample { 22 , 0 } ,
} ) ,
} ,
[ ] Series {
newSeries ( map [ string ] string { "__name__" : "a" } , [ ] tsdbutil . Sample {
sample { 7 , 59 } , sample { 8 , 59 } , sample { 9 , 59 } , sample { 10 , 59 } ,
sample { 11 , 59 } ,
} ) ,
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 7 , 59 } , sample { 8 , 59 } , sample { 9 , 59 } , sample { 10 , 59 } ,
sample { 11 , 59 } ,
} ) ,
newSeries ( map [ string ] string { "aa" : "bb" } , [ ] tsdbutil . Sample {
sample { 7 , 59 } , sample { 8 , 59 } , sample { 9 , 59 } , sample { 10 , 59 } ,
sample { 11 , 59 } ,
} ) ,
newSeries ( map [ string ] string { "c" : "d" } , [ ] tsdbutil . Sample {
sample { 7 , 59 } , sample { 8 , 59 } , sample { 9 , 59 } , sample { 10 , 59 } ,
sample { 11 , 59 } ,
} ) ,
} ,
[ ] Series {
newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample {
sample { 3 , 99 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 8 , 99 } ,
sample { 9 , 99 } , sample { 10 , 99 } , sample { 13 , 99 } , sample { 15 , 99 } ,
sample { 16 , 99 } , sample { 17 , 99 } ,
} ) ,
newSeries ( map [ string ] string { "aa" : "bb" } , [ ] tsdbutil . Sample {
sample { 3 , 99 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 8 , 99 } ,
sample { 9 , 99 } , sample { 10 , 99 } , sample { 13 , 99 } , sample { 15 , 99 } ,
sample { 16 , 99 } , sample { 17 , 99 } ,
} ) ,
newSeries ( map [ string ] string { "c" : "d" } , [ ] tsdbutil . Sample {
sample { 3 , 99 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 8 , 99 } ,
sample { 9 , 99 } , sample { 10 , 99 } , sample { 13 , 99 } , sample { 15 , 99 } ,
sample { 16 , 99 } , sample { 17 , 99 } ,
} ) ,
} ,
} ,
expSeries : map [ string ] [ ] tsdbutil . Sample {
` { __name__="a"} ` : {
sample { 7 , 59 } , sample { 8 , 59 } , sample { 9 , 59 } , sample { 10 , 59 } ,
sample { 11 , 59 } ,
} ,
` { a="b"} ` : {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 3 , 99 } ,
sample { 4 , 0 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 7 , 59 } ,
sample { 8 , 59 } , sample { 9 , 59 } , sample { 10 , 59 } , sample { 11 , 59 } ,
sample { 13 , 99 } , sample { 15 , 99 } , sample { 16 , 99 } , sample { 17 , 99 } ,
sample { 20 , 0 } , sample { 22 , 0 } ,
} ,
` { aa="bb"} ` : {
sample { 3 , 99 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 7 , 59 } ,
sample { 8 , 59 } , sample { 9 , 59 } , sample { 10 , 59 } , sample { 11 , 59 } ,
sample { 13 , 99 } , sample { 15 , 99 } , sample { 16 , 99 } , sample { 17 , 99 } ,
} ,
` { b="c"} ` : {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 4 , 0 } ,
sample { 5 , 0 } , sample { 8 , 0 } , sample { 9 , 0 } , sample { 10 , 0 } ,
sample { 13 , 0 } , sample { 15 , 0 } , sample { 16 , 0 } , sample { 17 , 0 } ,
sample { 20 , 0 } , sample { 22 , 0 } ,
} ,
` { c="d"} ` : {
sample { 0 , 0 } , sample { 1 , 0 } , sample { 2 , 0 } , sample { 3 , 99 } ,
sample { 4 , 0 } , sample { 5 , 99 } , sample { 6 , 99 } , sample { 7 , 59 } ,
sample { 8 , 59 } , sample { 9 , 59 } , sample { 10 , 59 } , sample { 11 , 59 } ,
sample { 13 , 99 } , sample { 15 , 99 } , sample { 16 , 99 } , sample { 17 , 99 } ,
sample { 20 , 0 } , sample { 22 , 0 } ,
} ,
} ,
} ,
}
defaultMatcher := labels . NewMustRegexpMatcher ( "__name__" , ".*" )
for _ , c := range cases {
if ok := t . Run ( "" , func ( t * testing . T ) {
tmpdir , err := ioutil . TempDir ( "" , "data" )
testutil . Ok ( t , err )
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( tmpdir ) )
} ( )
for _ , series := range c . blockSeries {
createBlock ( t , tmpdir , series )
}
2019-02-26 11:50:37 -08:00
opts := * DefaultOptions
2019-03-02 05:54:49 -08:00
opts . AllowOverlappingBlocks = true
2019-02-26 11:50:37 -08:00
db , err := Open ( tmpdir , nil , nil , & opts )
2019-02-14 05:29:41 -08:00
testutil . Ok ( t , err )
defer func ( ) {
testutil . Ok ( t , db . Close ( ) )
} ( )
db . DisableCompactions ( )
testutil . Assert ( t , len ( db . blocks ) == len ( c . blockSeries ) , "Wrong number of blocks [before compact]." )
// Vertical Query Merging test.
querier , err := db . Querier ( 0 , 100 )
testutil . Ok ( t , err )
actSeries := query ( t , querier , defaultMatcher )
testutil . Equals ( t , c . expSeries , actSeries )
// Vertical compaction.
lc := db . compactor . ( * LeveledCompactor )
testutil . Equals ( t , 0 , int ( prom_testutil . ToFloat64 ( lc . metrics . overlappingBlocks ) ) , "overlapping blocks count should be still 0 here" )
err = db . compact ( )
testutil . Ok ( t , err )
testutil . Equals ( t , 1 , len ( db . Blocks ( ) ) , "Wrong number of blocks [after compact]" )
testutil . Equals ( t , 1 , int ( prom_testutil . ToFloat64 ( lc . metrics . overlappingBlocks ) ) , "overlapping blocks count mismatch" )
// Query test after merging the overlapping blocks.
querier , err = db . Querier ( 0 , 100 )
testutil . Ok ( t , err )
actSeries = query ( t , querier , defaultMatcher )
testutil . Equals ( t , c . expSeries , actSeries )
} ) ; ! ok {
return
}
}
}
2018-12-04 02:30:49 -08:00
// TestBlockRanges checks the following use cases:
// - No samples can be added with timestamps lower than the last block maxt.
2019-01-28 18:25:12 -08:00
// - The compactor doesn't create overlapping blocks
2018-12-04 02:30:49 -08:00
// even when the last blocks is not within the default boundaries.
2019-01-28 18:25:12 -08:00
// - Lower boundary is based on the smallest sample in the head and
2018-12-04 02:30:49 -08:00
// upper boundary is rounded to the configured block range.
//
// This ensures that a snapshot that includes the head and creates a block with a custom time range
// will not overlap with the first block created by the next compaction.
func TestBlockRanges ( t * testing . T ) {
logger := log . NewLogfmtLogger ( log . NewSyncWriter ( os . Stderr ) )
dir , err := ioutil . TempDir ( "" , "test_storage" )
if err != nil {
t . Fatalf ( "Opening test dir failed: %s" , err )
}
rangeToTriggercompaction := DefaultOptions . BlockRanges [ 0 ] / 2 * 3 + 1
// Test that the compactor doesn't create overlapping blocks
// when a non standard block already exists.
firstBlockMaxT := int64 ( 3 )
2019-01-28 03:24:49 -08:00
createBlock ( t , dir , genSeries ( 1 , 1 , 0 , firstBlockMaxT ) )
2018-12-04 02:30:49 -08:00
db , err := Open ( dir , logger , nil , DefaultOptions )
if err != nil {
t . Fatalf ( "Opening test storage failed: %s" , err )
}
defer func ( ) {
os . RemoveAll ( dir )
} ( )
app := db . Appender ( )
lbl := labels . Labels { { "a" , "b" } }
_ , err = app . Add ( lbl , firstBlockMaxT - 1 , rand . Float64 ( ) )
if err == nil {
t . Fatalf ( "appending a sample with a timestamp covered by a previous block shouldn't be possible" )
}
_ , err = app . Add ( lbl , firstBlockMaxT + 1 , rand . Float64 ( ) )
testutil . Ok ( t , err )
_ , err = app . Add ( lbl , firstBlockMaxT + 2 , rand . Float64 ( ) )
testutil . Ok ( t , err )
secondBlockMaxt := firstBlockMaxT + rangeToTriggercompaction
_ , err = app . Add ( lbl , secondBlockMaxt , rand . Float64 ( ) ) // Add samples to trigger a new compaction
testutil . Ok ( t , err )
testutil . Ok ( t , app . Commit ( ) )
2018-12-12 03:49:03 -08:00
for x := 0 ; x < 100 ; x ++ {
2018-12-04 02:30:49 -08:00
if len ( db . Blocks ( ) ) == 2 {
break
}
time . Sleep ( 100 * time . Millisecond )
}
testutil . Equals ( t , 2 , len ( db . Blocks ( ) ) , "no new block created after the set timeout" )
if db . Blocks ( ) [ 0 ] . Meta ( ) . MaxTime > db . Blocks ( ) [ 1 ] . Meta ( ) . MinTime {
t . Fatalf ( "new block overlaps old:%v,new:%v" , db . Blocks ( ) [ 0 ] . Meta ( ) , db . Blocks ( ) [ 1 ] . Meta ( ) )
}
// Test that wal records are skipped when an existing block covers the same time ranges
// and compaction doesn't create an overlapping block.
db . DisableCompactions ( )
_ , err = app . Add ( lbl , secondBlockMaxt + 1 , rand . Float64 ( ) )
testutil . Ok ( t , err )
_ , err = app . Add ( lbl , secondBlockMaxt + 2 , rand . Float64 ( ) )
testutil . Ok ( t , err )
_ , err = app . Add ( lbl , secondBlockMaxt + 3 , rand . Float64 ( ) )
testutil . Ok ( t , err )
_ , err = app . Add ( lbl , secondBlockMaxt + 4 , rand . Float64 ( ) )
2019-01-02 08:48:42 -08:00
testutil . Ok ( t , err )
2018-12-04 02:30:49 -08:00
testutil . Ok ( t , app . Commit ( ) )
testutil . Ok ( t , db . Close ( ) )
thirdBlockMaxt := secondBlockMaxt + 2
2019-01-28 03:24:49 -08:00
createBlock ( t , dir , genSeries ( 1 , 1 , secondBlockMaxt + 1 , thirdBlockMaxt ) )
2018-12-04 02:30:49 -08:00
db , err = Open ( dir , logger , nil , DefaultOptions )
if err != nil {
t . Fatalf ( "Opening test storage failed: %s" , err )
}
defer db . Close ( )
testutil . Equals ( t , 3 , len ( db . Blocks ( ) ) , "db doesn't include expected number of blocks" )
testutil . Equals ( t , db . Blocks ( ) [ 2 ] . Meta ( ) . MaxTime , thirdBlockMaxt , "unexpected maxt of the last block" )
app = db . Appender ( )
_ , err = app . Add ( lbl , thirdBlockMaxt + rangeToTriggercompaction , rand . Float64 ( ) ) // Trigger a compaction
testutil . Ok ( t , err )
testutil . Ok ( t , app . Commit ( ) )
2018-12-12 03:49:03 -08:00
for x := 0 ; x < 100 ; x ++ {
2018-12-04 02:30:49 -08:00
if len ( db . Blocks ( ) ) == 4 {
break
}
time . Sleep ( 100 * time . Millisecond )
}
testutil . Equals ( t , 4 , len ( db . Blocks ( ) ) , "no new block created after the set timeout" )
if db . Blocks ( ) [ 2 ] . Meta ( ) . MaxTime > db . Blocks ( ) [ 3 ] . Meta ( ) . MinTime {
t . Fatalf ( "new block overlaps old:%v,new:%v" , db . Blocks ( ) [ 2 ] . Meta ( ) , db . Blocks ( ) [ 3 ] . Meta ( ) )
}
}