2017-05-26 08:56:31 -07:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2017-05-16 00:13:33 -07:00
package tsdb
2017-11-21 03:15:02 -08:00
import (
2018-12-05 08:34:42 -08:00
"context"
2019-03-24 13:33:08 -07:00
"encoding/binary"
2019-07-03 03:47:31 -07:00
2019-03-24 13:33:08 -07:00
"errors"
2017-11-21 03:15:02 -08:00
"io/ioutil"
2018-10-23 14:35:52 -07:00
"math/rand"
2017-11-21 03:15:02 -08:00
"os"
2017-11-30 06:34:49 -08:00
"path/filepath"
2019-05-27 04:24:46 -07:00
"strconv"
2017-11-21 03:15:02 -08:00
"testing"
2017-12-06 17:06:14 -08:00
2018-10-23 14:35:52 -07:00
"github.com/go-kit/kit/log"
2019-03-24 13:33:08 -07:00
"github.com/prometheus/tsdb/chunks"
2019-06-24 08:42:29 -07:00
"github.com/prometheus/tsdb/labels"
2017-12-05 08:17:33 -08:00
"github.com/prometheus/tsdb/testutil"
2019-01-28 03:24:49 -08:00
"github.com/prometheus/tsdb/tsdbutil"
2017-11-21 03:15:02 -08:00
)
2018-02-12 02:40:12 -08:00
// In Prometheus 2.1.0 we had a bug where the meta.json version was falsely bumped
// to 2. We had a migration in place resetting it to 1 but we should move immediately to
// version 3 next time to avoid confusion and issues.
func TestBlockMetaMustNeverBeVersion2 ( t * testing . T ) {
dir , err := ioutil . TempDir ( "" , "metaversion" )
testutil . Ok ( t , err )
2019-03-19 06:31:57 -07:00
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( dir ) )
} ( )
2018-02-12 02:40:12 -08:00
2019-06-24 08:42:29 -07:00
_ , err = writeMetaFile ( log . NewNopLogger ( ) , dir , & BlockMeta { } )
testutil . Ok ( t , err )
2018-02-12 02:40:12 -08:00
2019-06-24 08:42:29 -07:00
meta , _ , err := readMetaFile ( dir )
2018-02-12 02:40:12 -08:00
testutil . Ok ( t , err )
testutil . Assert ( t , meta . Version != 2 , "meta.json version must never be 2" )
}
2017-11-21 03:15:02 -08:00
func TestSetCompactionFailed ( t * testing . T ) {
2018-02-23 07:04:50 -08:00
tmpdir , err := ioutil . TempDir ( "" , "test" )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
2019-03-19 06:31:57 -07:00
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( tmpdir ) )
} ( )
2017-11-21 03:15:02 -08:00
2019-07-03 03:47:31 -07:00
blockDir := createBlock ( t , tmpdir , genSeries ( 1 , 1 , 0 , 1 ) )
2019-01-16 02:03:52 -08:00
b , err := OpenBlock ( nil , blockDir , nil )
2018-12-29 03:20:51 -08:00
testutil . Ok ( t , err )
2017-12-06 17:06:14 -08:00
testutil . Equals ( t , false , b . meta . Compaction . Failed )
testutil . Ok ( t , b . setCompactionFailed ( ) )
testutil . Equals ( t , true , b . meta . Compaction . Failed )
testutil . Ok ( t , b . Close ( ) )
2017-11-21 03:15:02 -08:00
2019-01-16 02:03:52 -08:00
b , err = OpenBlock ( nil , blockDir , nil )
2017-12-06 17:06:14 -08:00
testutil . Ok ( t , err )
testutil . Equals ( t , true , b . meta . Compaction . Failed )
2018-12-29 03:20:51 -08:00
testutil . Ok ( t , b . Close ( ) )
2017-11-21 03:15:02 -08:00
}
2019-03-19 06:31:57 -07:00
func TestCreateBlock ( t * testing . T ) {
tmpdir , err := ioutil . TempDir ( "" , "test" )
testutil . Ok ( t , err )
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( tmpdir ) )
} ( )
b , err := OpenBlock ( nil , createBlock ( t , tmpdir , genSeries ( 1 , 1 , 0 , 10 ) ) , nil )
if err == nil {
testutil . Ok ( t , b . Close ( ) )
}
testutil . Ok ( t , err )
}
2019-03-24 13:33:08 -07:00
func TestCorruptedChunk ( t * testing . T ) {
for name , test := range map [ string ] struct {
corrFunc func ( f * os . File ) // Func that applies the corruption.
expErr error
} {
"invalid header size" : {
func ( f * os . File ) {
err := f . Truncate ( 1 )
testutil . Ok ( t , err )
} ,
errors . New ( "invalid chunk header in segment 0: invalid size" ) ,
} ,
"invalid magic number" : {
func ( f * os . File ) {
magicChunksOffset := int64 ( 0 )
_ , err := f . Seek ( magicChunksOffset , 0 )
testutil . Ok ( t , err )
// Set invalid magic number.
b := make ( [ ] byte , chunks . MagicChunksSize )
binary . BigEndian . PutUint32 ( b [ : chunks . MagicChunksSize ] , 0x00000000 )
n , err := f . Write ( b )
testutil . Ok ( t , err )
testutil . Equals ( t , chunks . MagicChunksSize , n )
} ,
errors . New ( "invalid magic number 0" ) ,
} ,
"invalid chunk format version" : {
func ( f * os . File ) {
chunksFormatVersionOffset := int64 ( 4 )
_ , err := f . Seek ( chunksFormatVersionOffset , 0 )
testutil . Ok ( t , err )
// Set invalid chunk format version.
b := make ( [ ] byte , chunks . ChunksFormatVersionSize )
b [ 0 ] = 0
n , err := f . Write ( b )
testutil . Ok ( t , err )
testutil . Equals ( t , chunks . ChunksFormatVersionSize , n )
} ,
errors . New ( "invalid chunk format version 0" ) ,
} ,
} {
t . Run ( name , func ( t * testing . T ) {
tmpdir , err := ioutil . TempDir ( "" , "test_open_block_chunk_corrupted" )
testutil . Ok ( t , err )
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( tmpdir ) )
} ( )
2019-07-03 03:47:31 -07:00
blockDir := createBlock ( t , tmpdir , genSeries ( 1 , 1 , 0 , 1 ) )
2019-03-24 13:33:08 -07:00
files , err := sequenceFiles ( chunkDir ( blockDir ) )
testutil . Ok ( t , err )
testutil . Assert ( t , len ( files ) > 0 , "No chunk created." )
f , err := os . OpenFile ( files [ 0 ] , os . O_RDWR , 0666 )
testutil . Ok ( t , err )
// Apply corruption function.
test . corrFunc ( f )
testutil . Ok ( t , f . Close ( ) )
_ , err = OpenBlock ( nil , blockDir , nil )
testutil . Equals ( t , test . expErr . Error ( ) , err . Error ( ) )
} )
}
}
2019-06-24 08:42:29 -07:00
// TestBlockSize ensures that the block size is calculated correctly.
func TestBlockSize ( t * testing . T ) {
tmpdir , err := ioutil . TempDir ( "" , "test_blockSize" )
testutil . Ok ( t , err )
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( tmpdir ) )
} ( )
var (
blockInit * Block
expSizeInit int64
blockDirInit string
)
// Create a block and compare the reported size vs actual disk size.
{
blockDirInit = createBlock ( t , tmpdir , genSeries ( 10 , 1 , 1 , 100 ) )
blockInit , err = OpenBlock ( nil , blockDirInit , nil )
testutil . Ok ( t , err )
defer func ( ) {
testutil . Ok ( t , blockInit . Close ( ) )
} ( )
expSizeInit = blockInit . Size ( )
actSizeInit , err := testutil . DirSize ( blockInit . Dir ( ) )
testutil . Ok ( t , err )
testutil . Equals ( t , expSizeInit , actSizeInit )
}
// Delete some series and check the sizes again.
{
testutil . Ok ( t , blockInit . Delete ( 1 , 10 , labels . NewMustRegexpMatcher ( "" , ".*" ) ) )
expAfterDelete := blockInit . Size ( )
testutil . Assert ( t , expAfterDelete > expSizeInit , "after a delete the block size should be bigger as the tombstone file should grow %v > %v" , expAfterDelete , expSizeInit )
actAfterDelete , err := testutil . DirSize ( blockDirInit )
testutil . Ok ( t , err )
testutil . Equals ( t , expAfterDelete , actAfterDelete , "after a delete reported block size doesn't match actual disk size" )
c , err := NewLeveledCompactor ( context . Background ( ) , nil , log . NewNopLogger ( ) , [ ] int64 { 0 } , nil )
testutil . Ok ( t , err )
blockDirAfterCompact , err := c . Compact ( tmpdir , [ ] string { blockInit . Dir ( ) } , nil )
testutil . Ok ( t , err )
blockAfterCompact , err := OpenBlock ( nil , filepath . Join ( tmpdir , blockDirAfterCompact . String ( ) ) , nil )
testutil . Ok ( t , err )
defer func ( ) {
testutil . Ok ( t , blockAfterCompact . Close ( ) )
} ( )
expAfterCompact := blockAfterCompact . Size ( )
actAfterCompact , err := testutil . DirSize ( blockAfterCompact . Dir ( ) )
testutil . Ok ( t , err )
testutil . Assert ( t , actAfterDelete > actAfterCompact , "after a delete and compaction the block size should be smaller %v,%v" , actAfterDelete , actAfterCompact )
testutil . Equals ( t , expAfterCompact , actAfterCompact , "after a delete and compaction reported block size doesn't match actual disk size" )
}
}
2019-01-28 03:24:49 -08:00
// createBlock creates a block with given set of series and returns its dir.
func createBlock ( tb testing . TB , dir string , series [ ] Series ) string {
2019-06-07 06:41:44 -07:00
head := createHead ( tb , series )
compactor , err := NewLeveledCompactor ( context . Background ( ) , nil , log . NewNopLogger ( ) , [ ] int64 { 1000000 } , nil )
testutil . Ok ( tb , err )
testutil . Ok ( tb , os . MkdirAll ( dir , 0777 ) )
2019-07-03 03:47:31 -07:00
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
ulid , err := compactor . Write ( dir , head , head . MinTime ( ) , head . MaxTime ( ) + 1 , nil )
2019-06-07 06:41:44 -07:00
testutil . Ok ( tb , err )
return filepath . Join ( dir , ulid . String ( ) )
}
func createHead ( tb testing . TB , series [ ] Series ) * Head {
2018-10-23 14:35:52 -07:00
head , err := NewHead ( nil , nil , nil , 2 * 60 * 60 * 1000 )
testutil . Ok ( tb , err )
defer head . Close ( )
2019-01-28 03:24:49 -08:00
app := head . Appender ( )
for _ , s := range series {
ref := uint64 ( 0 )
it := s . Iterator ( )
for it . Next ( ) {
t , v := it . At ( )
if ref != 0 {
err := app . AddFast ( ref , t , v )
2019-01-18 08:58:17 -08:00
if err == nil {
continue
}
2018-10-23 14:35:52 -07:00
}
2019-01-28 03:24:49 -08:00
ref , err = app . Add ( s . Labels ( ) , t , v )
2018-10-23 14:35:52 -07:00
testutil . Ok ( tb , err )
}
2019-01-28 03:24:49 -08:00
testutil . Ok ( tb , it . Err ( ) )
2018-10-23 14:35:52 -07:00
}
2019-01-28 03:24:49 -08:00
err = app . Commit ( )
testutil . Ok ( tb , err )
2019-06-07 06:41:44 -07:00
return head
2018-10-23 14:35:52 -07:00
}
2019-01-28 03:24:49 -08:00
2019-05-27 04:24:46 -07:00
const (
defaultLabelName = "labelName"
defaultLabelValue = "labelValue"
)
2019-01-28 03:24:49 -08:00
// genSeries generates series with a given number of labels and values.
func genSeries ( totalSeries , labelCount int , mint , maxt int64 ) [ ] Series {
if totalSeries == 0 || labelCount == 0 {
return nil
}
2019-02-14 05:29:41 -08:00
series := make ( [ ] Series , totalSeries )
2019-06-07 06:41:44 -07:00
2019-01-28 03:24:49 -08:00
for i := 0 ; i < totalSeries ; i ++ {
lbls := make ( map [ string ] string , labelCount )
2019-05-27 04:24:46 -07:00
lbls [ defaultLabelName ] = strconv . Itoa ( i )
for j := 1 ; len ( lbls ) < labelCount ; j ++ {
lbls [ defaultLabelName + strconv . Itoa ( j ) ] = defaultLabelValue + strconv . Itoa ( j )
2019-01-28 03:24:49 -08:00
}
samples := make ( [ ] tsdbutil . Sample , 0 , maxt - mint + 1 )
2019-07-03 03:47:31 -07:00
for t := mint ; t < maxt ; t ++ {
2019-01-28 03:24:49 -08:00
samples = append ( samples , sample { t : t , v : rand . Float64 ( ) } )
}
series [ i ] = newSeries ( lbls , samples )
}
2019-02-14 05:29:41 -08:00
return series
}
// populateSeries generates series from given labels, mint and maxt.
func populateSeries ( lbls [ ] map [ string ] string , mint , maxt int64 ) [ ] Series {
if len ( lbls ) == 0 {
return nil
}
2019-01-28 03:24:49 -08:00
2019-02-14 05:29:41 -08:00
series := make ( [ ] Series , 0 , len ( lbls ) )
for _ , lbl := range lbls {
if len ( lbl ) == 0 {
continue
}
samples := make ( [ ] tsdbutil . Sample , 0 , maxt - mint + 1 )
for t := mint ; t <= maxt ; t ++ {
samples = append ( samples , sample { t : t , v : rand . Float64 ( ) } )
}
series = append ( series , newSeries ( lbl , samples ) )
}
2019-01-28 03:24:49 -08:00
return series
}