2017-04-10 11:59:45 -07:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2017-01-05 06:13:01 -08:00
package tsdb
import (
2020-07-24 07:10:51 -07:00
"context"
2019-06-19 06:46:24 -07:00
"fmt"
2021-07-20 01:47:20 -07:00
"io"
2018-12-04 02:30:49 -08:00
"math"
2017-09-04 06:07:30 -07:00
"math/rand"
2018-05-17 06:04:32 -07:00
"os"
2021-08-17 10:08:16 -07:00
"path"
2019-06-14 08:39:22 -07:00
"path/filepath"
2018-09-17 04:28:55 -07:00
"sort"
2019-10-02 23:19:55 -07:00
"strconv"
2021-09-08 07:23:44 -07:00
"strings"
2020-03-16 05:59:22 -07:00
"sync"
2017-01-05 06:13:01 -08:00
"testing"
2021-07-20 01:47:20 -07:00
"time"
2017-01-05 06:13:01 -08:00
2019-06-14 08:39:22 -07:00
"github.com/pkg/errors"
2021-10-04 22:21:25 -07:00
"github.com/prometheus/client_golang/prometheus"
2018-12-18 02:24:56 -08:00
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
2020-10-29 02:43:23 -07:00
"github.com/stretchr/testify/require"
2021-07-20 01:47:20 -07:00
"go.uber.org/atomic"
2022-01-10 05:36:45 -08:00
"golang.org/x/sync/errgroup"
2020-10-22 02:00:08 -07:00
2021-07-19 21:52:57 -07:00
"github.com/prometheus/prometheus/config"
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
2020-02-06 07:58:38 -08:00
"github.com/prometheus/prometheus/storage"
2019-08-13 01:34:14 -07:00
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/index"
2019-09-19 02:15:41 -07:00
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
2019-08-13 01:34:14 -07:00
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wal"
2017-01-05 06:13:01 -08:00
)
2020-07-21 01:39:02 -07:00
func newTestHead ( t testing . TB , chunkRange int64 , compressWAL bool ) ( * Head , * wal . WAL ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2020-07-21 01:39:02 -07:00
wlog , err := wal . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , compressWAL )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-21 01:39:02 -07:00
2021-02-09 06:12:48 -08:00
opts := DefaultHeadOptions ( )
opts . ChunkRange = chunkRange
opts . ChunkDirRoot = dir
2021-07-19 21:52:57 -07:00
opts . EnableExemplarStorage = true
opts . MaxExemplars . Store ( config . DefaultExemplarsConfig . MaxExemplars )
2021-11-19 02:11:32 -08:00
2021-06-05 07:29:32 -07:00
h , err := NewHead ( nil , nil , wlog , opts , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-21 01:39:02 -07:00
2021-11-06 03:10:04 -07:00
require . NoError ( t , h . chunkDiskMapper . IterateAllChunks ( func ( _ chunks . HeadSeriesRef , _ chunks . ChunkDiskMapperRef , _ , _ int64 , _ uint16 ) error { return nil } ) )
2020-07-21 01:39:02 -07:00
return h , wlog
}
2017-02-08 16:13:16 -08:00
func BenchmarkCreateSeries ( b * testing . B ) {
2019-06-14 05:30:49 -07:00
series := genSeries ( b . N , 10 , 0 , 0 )
2020-07-21 01:39:02 -07:00
h , _ := newTestHead ( b , 10000 , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( b , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2017-02-08 16:13:16 -08:00
2017-08-30 09:34:54 -07:00
b . ReportAllocs ( )
b . ResetTimer ( )
2017-02-08 16:13:16 -08:00
2019-06-14 05:30:49 -07:00
for _ , s := range series {
h . getOrCreate ( s . Labels ( ) . Hash ( ) , s . Labels ( ) )
2017-08-30 09:34:54 -07:00
}
2017-02-08 16:13:16 -08:00
}
2017-02-14 15:54:52 -08:00
2018-05-17 06:04:32 -07:00
func populateTestWAL ( t testing . TB , w * wal . WAL , recs [ ] interface { } ) {
2019-09-19 02:15:41 -07:00
var enc record . Encoder
2018-05-17 06:04:32 -07:00
for _ , r := range recs {
switch v := r . ( type ) {
2019-09-19 02:15:41 -07:00
case [ ] record . RefSeries :
2020-10-29 02:43:23 -07:00
require . NoError ( t , w . Log ( enc . Series ( v , nil ) ) )
2019-09-19 02:15:41 -07:00
case [ ] record . RefSample :
2020-10-29 02:43:23 -07:00
require . NoError ( t , w . Log ( enc . Samples ( v , nil ) ) )
2019-09-19 02:15:41 -07:00
case [ ] tombstones . Stone :
2020-10-29 02:43:23 -07:00
require . NoError ( t , w . Log ( enc . Tombstones ( v , nil ) ) )
2021-05-06 13:53:52 -07:00
case [ ] record . RefExemplar :
require . NoError ( t , w . Log ( enc . Exemplars ( v , nil ) ) )
2017-09-19 01:20:19 -07:00
}
}
2018-05-17 06:04:32 -07:00
}
func readTestWAL ( t testing . TB , dir string ) ( recs [ ] interface { } ) {
sr , err := wal . NewSegmentsReader ( dir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-05-17 06:04:32 -07:00
defer sr . Close ( )
2019-09-19 02:15:41 -07:00
var dec record . Decoder
2018-05-17 06:04:32 -07:00
r := wal . NewReader ( sr )
for r . Next ( ) {
rec := r . Record ( )
switch dec . Type ( rec ) {
2019-09-19 02:15:41 -07:00
case record . Series :
2018-05-17 06:04:32 -07:00
series , err := dec . Series ( rec , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-05-17 06:04:32 -07:00
recs = append ( recs , series )
2019-09-19 02:15:41 -07:00
case record . Samples :
2018-05-17 06:04:32 -07:00
samples , err := dec . Samples ( rec , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-05-17 06:04:32 -07:00
recs = append ( recs , samples )
2019-09-19 02:15:41 -07:00
case record . Tombstones :
2018-05-17 06:04:32 -07:00
tstones , err := dec . Tombstones ( rec , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-05-17 06:04:32 -07:00
recs = append ( recs , tstones )
default :
t . Fatalf ( "unknown record type" )
}
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , r . Err ( ) )
2018-05-17 06:04:32 -07:00
return recs
2017-09-19 01:20:19 -07:00
}
2019-10-02 23:19:55 -07:00
func BenchmarkLoadWAL ( b * testing . B ) {
cases := [ ] struct {
// Total series is (batches*seriesPerBatch).
batches int
seriesPerBatch int
samplesPerSeries int
2021-08-10 02:23:31 -07:00
mmappedChunkT int64
2019-10-02 23:19:55 -07:00
} {
2019-11-07 08:26:45 -08:00
{ // Less series and more samples. 2 hour WAL with 1 second scrape interval.
2019-10-02 23:19:55 -07:00
batches : 10 ,
seriesPerBatch : 100 ,
2019-11-07 08:26:45 -08:00
samplesPerSeries : 7200 ,
2019-10-02 23:19:55 -07:00
} ,
{ // More series and less samples.
batches : 10 ,
seriesPerBatch : 10000 ,
2019-11-07 08:26:45 -08:00
samplesPerSeries : 50 ,
2019-10-02 23:19:55 -07:00
} ,
{ // In between.
batches : 10 ,
seriesPerBatch : 1000 ,
2019-11-07 08:26:45 -08:00
samplesPerSeries : 480 ,
2019-10-02 23:19:55 -07:00
} ,
2021-08-10 02:23:31 -07:00
{ // 2 hour WAL with 15 second scrape interval, and mmapped chunks up to last 100 samples.
batches : 100 ,
seriesPerBatch : 1000 ,
samplesPerSeries : 480 ,
mmappedChunkT : 3800 ,
} ,
2019-10-02 23:19:55 -07:00
}
labelsPerSeries := 5
2021-05-06 13:53:52 -07:00
// Rough estimates of most common % of samples that have an exemplar for each scrape.
exemplarsPercentages := [ ] float64 { 0 , 0.5 , 1 , 5 }
lastExemplarsPerSeries := - 1
2019-10-02 23:19:55 -07:00
for _ , c := range cases {
2021-05-06 13:53:52 -07:00
for _ , p := range exemplarsPercentages {
exemplarsPerSeries := int ( math . RoundToEven ( float64 ( c . samplesPerSeries ) * p / 100 ) )
// For tests with low samplesPerSeries we could end up testing with 0 exemplarsPerSeries
// multiple times without this check.
if exemplarsPerSeries == lastExemplarsPerSeries {
continue
}
lastExemplarsPerSeries = exemplarsPerSeries
// fmt.Println("exemplars per series: ", exemplarsPerSeries)
2021-08-10 02:23:31 -07:00
b . Run ( fmt . Sprintf ( "batches=%d,seriesPerBatch=%d,samplesPerSeries=%d,exemplarsPerSeries=%d,mmappedChunkT=%d" , c . batches , c . seriesPerBatch , c . samplesPerSeries , exemplarsPerSeries , c . mmappedChunkT ) ,
2021-05-06 13:53:52 -07:00
func ( b * testing . B ) {
2022-01-22 01:55:01 -08:00
dir := b . TempDir ( )
2021-05-06 13:53:52 -07:00
w , err := wal . New ( nil , nil , dir , false )
require . NoError ( b , err )
// Write series.
refSeries := make ( [ ] record . RefSeries , 0 , c . seriesPerBatch )
for k := 0 ; k < c . batches ; k ++ {
refSeries = refSeries [ : 0 ]
for i := k * c . seriesPerBatch ; i < ( k + 1 ) * c . seriesPerBatch ; i ++ {
lbls := make ( map [ string ] string , labelsPerSeries )
lbls [ defaultLabelName ] = strconv . Itoa ( i )
for j := 1 ; len ( lbls ) < labelsPerSeries ; j ++ {
lbls [ defaultLabelName + strconv . Itoa ( j ) ] = defaultLabelValue + strconv . Itoa ( j )
}
2021-11-06 03:10:04 -07:00
refSeries = append ( refSeries , record . RefSeries { Ref : chunks . HeadSeriesRef ( i ) * 101 , Labels : labels . FromMap ( lbls ) } )
2019-10-02 23:19:55 -07:00
}
2021-05-06 13:53:52 -07:00
populateTestWAL ( b , w , [ ] interface { } { refSeries } )
2019-10-02 23:19:55 -07:00
}
2021-05-06 13:53:52 -07:00
// Write samples.
refSamples := make ( [ ] record . RefSample , 0 , c . seriesPerBatch )
for i := 0 ; i < c . samplesPerSeries ; i ++ {
for j := 0 ; j < c . batches ; j ++ {
refSamples = refSamples [ : 0 ]
for k := j * c . seriesPerBatch ; k < ( j + 1 ) * c . seriesPerBatch ; k ++ {
refSamples = append ( refSamples , record . RefSample {
2021-11-06 03:10:04 -07:00
Ref : chunks . HeadSeriesRef ( k ) * 101 ,
2021-05-06 13:53:52 -07:00
T : int64 ( i ) * 10 ,
V : float64 ( i ) * 100 ,
} )
}
populateTestWAL ( b , w , [ ] interface { } { refSamples } )
}
}
2021-08-10 02:23:31 -07:00
// Write mmapped chunks.
if c . mmappedChunkT != 0 {
2022-01-10 05:36:45 -08:00
chunkDiskMapper , err := chunks . NewChunkDiskMapper ( nil , mmappedChunksDir ( dir ) , chunkenc . NewPool ( ) , chunks . DefaultWriteBufferSize , chunks . DefaultWriteQueueSize )
2021-08-10 02:23:31 -07:00
require . NoError ( b , err )
for k := 0 ; k < c . batches * c . seriesPerBatch ; k ++ {
// Create one mmapped chunk per series, with one sample at the given time.
2021-11-19 02:11:32 -08:00
s := newMemSeries ( labels . Labels { } , chunks . HeadSeriesRef ( k ) * 101 , c . mmappedChunkT , nil , defaultIsolationDisabled )
2021-08-10 02:23:31 -07:00
s . append ( c . mmappedChunkT , 42 , 0 , chunkDiskMapper )
s . mmapCurrentHeadChunk ( chunkDiskMapper )
}
require . NoError ( b , chunkDiskMapper . Close ( ) )
}
// Write exemplars.
2021-05-06 13:53:52 -07:00
refExemplars := make ( [ ] record . RefExemplar , 0 , c . seriesPerBatch )
for i := 0 ; i < exemplarsPerSeries ; i ++ {
for j := 0 ; j < c . batches ; j ++ {
refExemplars = refExemplars [ : 0 ]
for k := j * c . seriesPerBatch ; k < ( j + 1 ) * c . seriesPerBatch ; k ++ {
refExemplars = append ( refExemplars , record . RefExemplar {
2021-11-06 03:10:04 -07:00
Ref : chunks . HeadSeriesRef ( k ) * 101 ,
2021-05-06 13:53:52 -07:00
T : int64 ( i ) * 10 ,
V : float64 ( i ) * 100 ,
Labels : labels . FromStrings ( "traceID" , fmt . Sprintf ( "trace-%d" , i ) ) ,
} )
}
populateTestWAL ( b , w , [ ] interface { } { refExemplars } )
2019-10-02 23:19:55 -07:00
}
}
2021-05-06 13:53:52 -07:00
b . ResetTimer ( )
2019-10-02 23:19:55 -07:00
2021-05-06 13:53:52 -07:00
// Load the WAL.
for i := 0 ; i < b . N ; i ++ {
opts := DefaultHeadOptions ( )
opts . ChunkRange = 1000
opts . ChunkDirRoot = w . Dir ( )
2021-06-05 07:29:32 -07:00
h , err := NewHead ( nil , nil , w , opts , nil )
2021-05-06 13:53:52 -07:00
require . NoError ( b , err )
h . Init ( 0 )
}
2021-08-10 02:23:31 -07:00
b . StopTimer ( )
w . Close ( )
2021-05-06 13:53:52 -07:00
} )
}
2019-10-02 23:19:55 -07:00
}
}
2022-01-10 05:36:45 -08:00
// TestHead_HighConcurrencyReadAndWrite generates 1000 series with a step of 15s and fills a whole block with samples,
// this means in total it generates 4000 chunks because with a step of 15s there are 4 chunks per block per series.
// While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the
// returned results are correct.
func TestHead_HighConcurrencyReadAndWrite ( t * testing . T ) {
head , _ := newTestHead ( t , DefaultBlockDuration , false )
defer func ( ) {
require . NoError ( t , head . Close ( ) )
} ( )
seriesCnt := 1000
readConcurrency := 2
writeConcurrency := 10
startTs := uint64 ( DefaultBlockDuration ) // start at the second block relative to the unix epoch.
qryRange := uint64 ( 5 * time . Minute . Milliseconds ( ) )
step := uint64 ( 15 * time . Second / time . Millisecond )
endTs := startTs + uint64 ( DefaultBlockDuration )
labelSets := make ( [ ] labels . Labels , seriesCnt )
for i := 0 ; i < seriesCnt ; i ++ {
labelSets [ i ] = labels . FromStrings ( "seriesId" , strconv . Itoa ( i ) )
}
head . Init ( 0 )
g , ctx := errgroup . WithContext ( context . Background ( ) )
whileNotCanceled := func ( f func ( ) ( bool , error ) ) error {
for ctx . Err ( ) == nil {
cont , err := f ( )
if err != nil {
return err
}
if ! cont {
return nil
}
}
return nil
}
// Create one channel for each write worker, the channels will be used by the coordinator
// go routine to coordinate which timestamps each write worker has to write.
writerTsCh := make ( [ ] chan uint64 , writeConcurrency )
for writerTsChIdx := range writerTsCh {
writerTsCh [ writerTsChIdx ] = make ( chan uint64 )
}
// workerReadyWg is used to synchronize the start of the test,
// we only start the test once all workers signal that they're ready.
var workerReadyWg sync . WaitGroup
workerReadyWg . Add ( writeConcurrency + readConcurrency )
// Start the write workers.
for wid := 0 ; wid < writeConcurrency ; wid ++ {
// Create copy of workerID to be used by worker routine.
workerID := wid
g . Go ( func ( ) error {
// The label sets which this worker will write.
workerLabelSets := labelSets [ ( seriesCnt / writeConcurrency ) * workerID : ( seriesCnt / writeConcurrency ) * ( workerID + 1 ) ]
// Signal that this worker is ready.
workerReadyWg . Done ( )
return whileNotCanceled ( func ( ) ( bool , error ) {
ts , ok := <- writerTsCh [ workerID ]
if ! ok {
return false , nil
}
app := head . Appender ( ctx )
for i := 0 ; i < len ( workerLabelSets ) ; i ++ {
// We also use the timestamp as the sample value.
_ , err := app . Append ( 0 , workerLabelSets [ i ] , int64 ( ts ) , float64 ( ts ) )
if err != nil {
return false , fmt . Errorf ( "Error when appending to head: %w" , err )
}
}
return true , app . Commit ( )
} )
} )
}
// queryHead is a helper to query the head for a given time range and labelset.
queryHead := func ( mint , maxt uint64 , label labels . Label ) ( map [ string ] [ ] tsdbutil . Sample , error ) {
q , err := NewBlockQuerier ( head , int64 ( mint ) , int64 ( maxt ) )
if err != nil {
return nil , err
}
return query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , label . Name , label . Value ) ) , nil
}
// readerTsCh will be used by the coordinator go routine to coordinate which timestamps the reader should read.
readerTsCh := make ( chan uint64 )
// Start the read workers.
for wid := 0 ; wid < readConcurrency ; wid ++ {
// Create copy of threadID to be used by worker routine.
workerID := wid
g . Go ( func ( ) error {
querySeriesRef := ( seriesCnt / readConcurrency ) * workerID
// Signal that this worker is ready.
workerReadyWg . Done ( )
return whileNotCanceled ( func ( ) ( bool , error ) {
ts , ok := <- readerTsCh
if ! ok {
return false , nil
}
querySeriesRef = ( querySeriesRef + 1 ) % seriesCnt
lbls := labelSets [ querySeriesRef ]
samples , err := queryHead ( ts - qryRange , ts , lbls [ 0 ] )
if err != nil {
return false , err
}
if len ( samples ) != 1 {
return false , fmt . Errorf ( "expected 1 series, got %d" , len ( samples ) )
}
series := lbls . String ( )
expectSampleCnt := qryRange / step + 1
if expectSampleCnt != uint64 ( len ( samples [ series ] ) ) {
return false , fmt . Errorf ( "expected %d samples, got %d" , expectSampleCnt , len ( samples [ series ] ) )
}
for sampleIdx , sample := range samples [ series ] {
expectedValue := ts - qryRange + ( uint64 ( sampleIdx ) * step )
if sample . T ( ) != int64 ( expectedValue ) {
return false , fmt . Errorf ( "expected sample %d to have ts %d, got %d" , sampleIdx , expectedValue , sample . T ( ) )
}
if sample . V ( ) != float64 ( expectedValue ) {
return false , fmt . Errorf ( "expected sample %d to have value %d, got %f" , sampleIdx , expectedValue , sample . V ( ) )
}
}
return true , nil
} )
} )
}
// Start the coordinator go routine.
g . Go ( func ( ) error {
currTs := startTs
defer func ( ) {
// End of the test, close all channels to stop the workers.
for _ , ch := range writerTsCh {
close ( ch )
}
close ( readerTsCh )
} ( )
// Wait until all workers are ready to start the test.
workerReadyWg . Wait ( )
return whileNotCanceled ( func ( ) ( bool , error ) {
// Send the current timestamp to each of the writers.
for _ , ch := range writerTsCh {
select {
case ch <- currTs :
case <- ctx . Done ( ) :
return false , nil
}
}
// Once data for at least <qryRange> has been ingested, send the current timestamp to the readers.
if currTs > startTs + qryRange {
select {
case readerTsCh <- currTs - step :
case <- ctx . Done ( ) :
return false , nil
}
}
currTs += step
if currTs > endTs {
return false , nil
}
return true , nil
} )
} )
require . NoError ( t , g . Wait ( ) )
}
2017-09-19 01:20:19 -07:00
func TestHead_ReadWAL ( t * testing . T ) {
2019-06-19 06:46:24 -07:00
for _ , compress := range [ ] bool { false , true } {
t . Run ( fmt . Sprintf ( "compress=%t" , compress ) , func ( t * testing . T ) {
entries := [ ] interface { } {
2019-09-19 02:15:41 -07:00
[ ] record . RefSeries {
2019-06-19 06:46:24 -07:00
{ Ref : 10 , Labels : labels . FromStrings ( "a" , "1" ) } ,
{ Ref : 11 , Labels : labels . FromStrings ( "a" , "2" ) } ,
{ Ref : 100 , Labels : labels . FromStrings ( "a" , "3" ) } ,
} ,
2019-09-19 02:15:41 -07:00
[ ] record . RefSample {
2019-06-19 06:46:24 -07:00
{ Ref : 0 , T : 99 , V : 1 } ,
{ Ref : 10 , T : 100 , V : 2 } ,
{ Ref : 100 , T : 100 , V : 3 } ,
} ,
2019-09-19 02:15:41 -07:00
[ ] record . RefSeries {
2019-06-19 06:46:24 -07:00
{ Ref : 50 , Labels : labels . FromStrings ( "a" , "4" ) } ,
// This series has two refs pointing to it.
{ Ref : 101 , Labels : labels . FromStrings ( "a" , "3" ) } ,
} ,
2019-09-19 02:15:41 -07:00
[ ] record . RefSample {
2019-06-19 06:46:24 -07:00
{ Ref : 10 , T : 101 , V : 5 } ,
{ Ref : 50 , T : 101 , V : 6 } ,
{ Ref : 101 , T : 101 , V : 7 } ,
} ,
2019-09-19 02:15:41 -07:00
[ ] tombstones . Stone {
{ Ref : 0 , Intervals : [ ] tombstones . Interval { { Mint : 99 , Maxt : 101 } } } ,
2019-06-19 06:46:24 -07:00
} ,
2021-05-06 13:53:52 -07:00
[ ] record . RefExemplar {
{ Ref : 10 , T : 100 , V : 1 , Labels : labels . FromStrings ( "traceID" , "asdf" ) } ,
} ,
2019-06-19 06:46:24 -07:00
}
2020-05-06 08:30:00 -07:00
2020-07-21 01:39:02 -07:00
head , w := newTestHead ( t , 1000 , compress )
2019-06-19 06:46:24 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Close ( ) )
2019-06-19 06:46:24 -07:00
} ( )
2018-05-17 06:04:32 -07:00
2019-06-19 06:46:24 -07:00
populateTestWAL ( t , w , entries )
2017-09-19 01:20:19 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Init ( math . MinInt64 ) )
require . Equal ( t , uint64 ( 101 ) , head . lastSeriesID . Load ( ) )
2017-09-19 01:20:19 -07:00
2019-06-19 06:46:24 -07:00
s10 := head . series . getByID ( 10 )
s11 := head . series . getByID ( 11 )
s50 := head . series . getByID ( 50 )
s100 := head . series . getByID ( 100 )
2017-09-19 01:20:19 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , labels . FromStrings ( "a" , "1" ) , s10 . lset )
require . Equal ( t , ( * memSeries ) ( nil ) , s11 ) // Series without samples should be garbage collected at head.Init().
require . Equal ( t , labels . FromStrings ( "a" , "4" ) , s50 . lset )
require . Equal ( t , labels . FromStrings ( "a" , "3" ) , s100 . lset )
2017-09-19 01:20:19 -07:00
2019-06-19 06:46:24 -07:00
expandChunk := func ( c chunkenc . Iterator ) ( x [ ] sample ) {
for c . Next ( ) {
t , v := c . At ( )
x = append ( x , sample { t : t , v : v } )
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , c . Err ( ) )
2019-06-19 06:46:24 -07:00
return x
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , [ ] sample { { 100 , 2 } , { 101 , 5 } } , expandChunk ( s10 . iterator ( 0 , nil , head . chunkDiskMapper , nil ) ) )
require . Equal ( t , [ ] sample { { 101 , 6 } } , expandChunk ( s50 . iterator ( 0 , nil , head . chunkDiskMapper , nil ) ) )
2021-08-03 07:33:54 -07:00
// The samples before the new series record should be discarded since a duplicate record
// is only possible when old samples were compacted.
require . Equal ( t , [ ] sample { { 101 , 7 } } , expandChunk ( s100 . iterator ( 0 , nil , head . chunkDiskMapper , nil ) ) )
2021-05-06 13:53:52 -07:00
q , err := head . ExemplarQuerier ( context . Background ( ) )
require . NoError ( t , err )
e , err := q . Select ( 0 , 1000 , [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "a" , "1" ) } )
require . NoError ( t , err )
require . Equal ( t , e [ 0 ] . Exemplars [ 0 ] , exemplar . Exemplar { Ts : 100 , Value : 1 , Labels : labels . FromStrings ( "traceID" , "asdf" ) } )
2019-06-19 06:46:24 -07:00
} )
2017-09-19 01:20:19 -07:00
}
2019-06-06 06:28:54 -07:00
}
func TestHead_WALMultiRef ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
head , w := newTestHead ( t , 1000 , false )
2019-06-06 06:28:54 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Init ( 0 ) )
2020-05-06 08:30:00 -07:00
2020-07-30 04:11:13 -07:00
app := head . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
ref1 , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 100 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( head . metrics . chunksCreated ) )
2020-05-06 08:30:00 -07:00
// Add another sample outside chunk range to mmap a chunk.
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 1500 , 2 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 2.0 , prom_testutil . ToFloat64 ( head . metrics . chunksCreated ) )
2019-06-06 06:28:54 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Truncate ( 1600 ) )
2019-06-06 06:28:54 -07:00
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
ref2 , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 1700 , 3 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 3.0 , prom_testutil . ToFloat64 ( head . metrics . chunksCreated ) )
2019-06-06 06:28:54 -07:00
2020-05-06 08:30:00 -07:00
// Add another sample outside chunk range to mmap a chunk.
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 2000 , 4 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 4.0 , prom_testutil . ToFloat64 ( head . metrics . chunksCreated ) )
2020-05-06 08:30:00 -07:00
2020-10-29 02:43:23 -07:00
require . NotEqual ( t , ref1 , ref2 , "Refs are the same" )
require . NoError ( t , head . Close ( ) )
2019-06-06 06:28:54 -07:00
2020-05-06 08:30:00 -07:00
w , err = wal . New ( nil , nil , w . Dir ( ) , false )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-06-06 06:28:54 -07:00
2021-02-09 06:12:48 -08:00
opts := DefaultHeadOptions ( )
opts . ChunkRange = 1000
opts . ChunkDirRoot = w . Dir ( )
2021-06-05 07:29:32 -07:00
head , err = NewHead ( nil , nil , w , opts , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , head . Init ( 0 ) )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2019-06-06 06:28:54 -07:00
2020-05-06 08:30:00 -07:00
q , err := NewBlockQuerier ( head , 0 , 2100 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-11-18 11:53:33 -08:00
series := query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2021-08-03 07:33:54 -07:00
// The samples before the new ref should be discarded since Head truncation
// happens only after compacting the Head.
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] [ ] tsdbutil . Sample { ` { foo="bar"} ` : {
2020-05-06 08:30:00 -07:00
sample { 1700 , 3 } ,
sample { 2000 , 4 } ,
} } , series )
2017-09-19 01:20:19 -07:00
}
2021-09-08 02:19:58 -07:00
func TestHead_ActiveAppenders ( t * testing . T ) {
head , _ := newTestHead ( t , 1000 , false )
defer head . Close ( )
require . NoError ( t , head . Init ( 0 ) )
// First rollback with no samples.
app := head . Appender ( context . Background ( ) )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( head . metrics . activeAppenders ) )
require . NoError ( t , app . Rollback ( ) )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( head . metrics . activeAppenders ) )
// Then commit with no samples.
app = head . Appender ( context . Background ( ) )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( head . metrics . activeAppenders ) )
// Now rollback with one sample.
app = head . Appender ( context . Background ( ) )
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 100 , 1 )
require . NoError ( t , err )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( head . metrics . activeAppenders ) )
require . NoError ( t , app . Rollback ( ) )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( head . metrics . activeAppenders ) )
// Now commit with one sample.
app = head . Appender ( context . Background ( ) )
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 100 , 1 )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( head . metrics . activeAppenders ) )
}
2020-10-05 02:09:59 -07:00
func TestHead_UnknownWALRecord ( t * testing . T ) {
head , w := newTestHead ( t , 1000 , false )
w . Log ( [ ] byte { 255 , 42 } )
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Init ( 0 ) )
require . NoError ( t , head . Close ( ) )
2020-10-05 02:09:59 -07:00
}
2017-09-01 05:38:49 -07:00
func TestHead_Truncate ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
h , _ := newTestHead ( t , 1000 , false )
2020-02-12 11:22:27 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-02-12 11:22:27 -08:00
} ( )
2017-09-01 05:38:49 -07:00
h . initTime ( 0 )
2020-05-20 06:22:08 -07:00
s1 , _ , _ := h . getOrCreate ( 1 , labels . FromStrings ( "a" , "1" , "b" , "1" ) )
s2 , _ , _ := h . getOrCreate ( 2 , labels . FromStrings ( "a" , "2" , "b" , "1" ) )
s3 , _ , _ := h . getOrCreate ( 3 , labels . FromStrings ( "a" , "1" , "b" , "2" ) )
s4 , _ , _ := h . getOrCreate ( 4 , labels . FromStrings ( "a" , "2" , "b" , "2" , "c" , "1" ) )
2017-09-01 05:38:49 -07:00
2020-05-06 08:30:00 -07:00
s1 . mmappedChunks = [ ] * mmappedChunk {
{ minTime : 0 , maxTime : 999 } ,
{ minTime : 1000 , maxTime : 1999 } ,
{ minTime : 2000 , maxTime : 2999 } ,
2017-09-01 05:38:49 -07:00
}
2020-05-06 08:30:00 -07:00
s2 . mmappedChunks = [ ] * mmappedChunk {
{ minTime : 1000 , maxTime : 1999 } ,
{ minTime : 2000 , maxTime : 2999 } ,
{ minTime : 3000 , maxTime : 3999 } ,
2017-09-01 05:38:49 -07:00
}
2020-05-06 08:30:00 -07:00
s3 . mmappedChunks = [ ] * mmappedChunk {
{ minTime : 0 , maxTime : 999 } ,
{ minTime : 1000 , maxTime : 1999 } ,
2017-09-01 05:38:49 -07:00
}
2020-05-06 08:30:00 -07:00
s4 . mmappedChunks = [ ] * mmappedChunk { }
2017-09-01 05:38:49 -07:00
2017-10-04 04:28:07 -07:00
// Truncation need not be aligned.
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 1 ) )
2017-09-01 05:38:49 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 2000 ) )
2017-09-01 05:38:49 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , [ ] * mmappedChunk {
2020-05-06 08:30:00 -07:00
{ minTime : 2000 , maxTime : 2999 } ,
} , h . series . getByID ( s1 . ref ) . mmappedChunks )
2017-09-01 05:38:49 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , [ ] * mmappedChunk {
2020-05-06 08:30:00 -07:00
{ minTime : 2000 , maxTime : 2999 } ,
{ minTime : 3000 , maxTime : 3999 } ,
} , h . series . getByID ( s2 . ref ) . mmappedChunks )
2017-09-01 05:38:49 -07:00
2020-10-29 02:43:23 -07:00
require . Nil ( t , h . series . getByID ( s3 . ref ) )
require . Nil ( t , h . series . getByID ( s4 . ref ) )
2017-09-01 05:38:49 -07:00
2017-11-30 06:34:49 -08:00
postingsA1 , _ := index . ExpandPostings ( h . postings . Get ( "a" , "1" ) )
postingsA2 , _ := index . ExpandPostings ( h . postings . Get ( "a" , "2" ) )
postingsB1 , _ := index . ExpandPostings ( h . postings . Get ( "b" , "1" ) )
postingsB2 , _ := index . ExpandPostings ( h . postings . Get ( "b" , "2" ) )
postingsC1 , _ := index . ExpandPostings ( h . postings . Get ( "c" , "1" ) )
postingsAll , _ := index . ExpandPostings ( h . postings . Get ( "" , "" ) )
2017-09-01 05:38:49 -07:00
2021-11-06 03:10:04 -07:00
require . Equal ( t , [ ] storage . SeriesRef { storage . SeriesRef ( s1 . ref ) } , postingsA1 )
require . Equal ( t , [ ] storage . SeriesRef { storage . SeriesRef ( s2 . ref ) } , postingsA2 )
require . Equal ( t , [ ] storage . SeriesRef { storage . SeriesRef ( s1 . ref ) , storage . SeriesRef ( s2 . ref ) } , postingsB1 )
require . Equal ( t , [ ] storage . SeriesRef { storage . SeriesRef ( s1 . ref ) , storage . SeriesRef ( s2 . ref ) } , postingsAll )
2020-10-29 02:43:23 -07:00
require . Nil ( t , postingsB2 )
require . Nil ( t , postingsC1 )
2017-09-01 05:38:49 -07:00
2021-09-08 02:18:48 -07:00
iter := h . postings . Symbols ( )
symbols := [ ] string { }
for iter . Next ( ) {
symbols = append ( symbols , iter . At ( ) )
}
require . Equal ( t ,
[ ] string { "" /* from 'all' postings list */ , "1" , "2" , "a" , "b" } ,
symbols )
2017-09-01 05:38:49 -07:00
2020-09-10 08:05:47 -07:00
values := map [ string ] map [ string ] struct { } { }
for _ , name := range h . postings . LabelNames ( ) {
ss , ok := values [ name ]
if ! ok {
ss = map [ string ] struct { } { }
values [ name ] = ss
}
for _ , value := range h . postings . LabelValues ( name ) {
ss [ value ] = struct { } { }
}
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] map [ string ] struct { } {
2018-05-07 05:39:54 -07:00
"a" : { "1" : struct { } { } , "2" : struct { } { } } ,
"b" : { "1" : struct { } { } } ,
2020-09-10 08:05:47 -07:00
} , values )
2017-09-01 05:38:49 -07:00
}
// Validate various behaviors brought on by firstChunkID accounting for
// garbage collected chunks.
func TestMemSeries_truncateChunks ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2020-05-06 08:30:00 -07:00
// This is usually taken from the Head, but passing manually here.
2022-01-10 05:36:45 -08:00
chunkDiskMapper , err := chunks . NewChunkDiskMapper ( nil , dir , chunkenc . NewPool ( ) , chunks . DefaultWriteBufferSize , chunks . DefaultWriteQueueSize )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , chunkDiskMapper . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
memChunkPool := sync . Pool {
New : func ( ) interface { } {
return & memChunk { }
} ,
}
2021-11-19 02:11:32 -08:00
s := newMemSeries ( labels . FromStrings ( "a" , "b" ) , 1 , 2000 , & memChunkPool , defaultIsolationDisabled )
2017-09-01 05:38:49 -07:00
for i := 0 ; i < 4000 ; i += 5 {
2020-05-06 08:30:00 -07:00
ok , _ := s . append ( int64 ( i ) , float64 ( i ) , 0 , chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "sample append failed" )
2017-09-01 05:38:49 -07:00
}
// Check that truncate removes half of the chunks and afterwards
// that the ID of the last chunk still gives us the same chunk afterwards.
2020-05-06 08:30:00 -07:00
countBefore := len ( s . mmappedChunks ) + 1 // +1 for the head chunk.
2021-11-17 05:05:10 -08:00
lastID := s . headChunkID ( countBefore - 1 )
2020-05-22 02:03:23 -07:00
lastChunk , _ , err := s . chunk ( lastID , chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NotNil ( t , lastChunk )
2017-09-01 05:38:49 -07:00
2020-05-22 02:03:23 -07:00
chk , _ , err := s . chunk ( 0 , chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . NotNil ( t , chk )
require . NoError ( t , err )
2017-09-01 05:38:49 -07:00
s . truncateChunksBefore ( 2000 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( 2000 ) , s . mmappedChunks [ 0 ] . minTime )
2020-05-22 02:03:23 -07:00
_ , _ , err = s . chunk ( 0 , chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrNotFound , err , "first chunks not gone" )
require . Equal ( t , countBefore / 2 , len ( s . mmappedChunks ) + 1 ) // +1 for the head chunk.
2020-05-22 02:03:23 -07:00
chk , _ , err = s . chunk ( lastID , chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , lastChunk , chk )
2017-09-01 05:38:49 -07:00
// Validate that the series' sample buffer is applied correctly to the last chunk
// after truncation.
2021-11-17 05:05:10 -08:00
it1 := s . iterator ( s . headChunkID ( len ( s . mmappedChunks ) ) , nil , chunkDiskMapper , nil )
2017-09-01 05:38:49 -07:00
_ , ok := it1 . ( * memSafeIterator )
2020-10-29 02:43:23 -07:00
require . True ( t , ok )
2017-09-01 05:38:49 -07:00
2021-11-17 05:05:10 -08:00
it2 := s . iterator ( s . headChunkID ( len ( s . mmappedChunks ) - 1 ) , nil , chunkDiskMapper , nil )
2017-09-01 05:38:49 -07:00
_ , ok = it2 . ( * memSafeIterator )
2020-10-29 02:43:23 -07:00
require . False ( t , ok , "non-last chunk incorrectly wrapped with sample buffer" )
2017-09-01 05:38:49 -07:00
}
2018-02-07 05:43:21 -08:00
func TestHeadDeleteSeriesWithoutSamples ( t * testing . T ) {
2019-06-19 06:46:24 -07:00
for _ , compress := range [ ] bool { false , true } {
t . Run ( fmt . Sprintf ( "compress=%t" , compress ) , func ( t * testing . T ) {
entries := [ ] interface { } {
2019-09-19 02:15:41 -07:00
[ ] record . RefSeries {
2019-06-19 06:46:24 -07:00
{ Ref : 10 , Labels : labels . FromStrings ( "a" , "1" ) } ,
} ,
2019-09-19 02:15:41 -07:00
[ ] record . RefSample { } ,
[ ] record . RefSeries {
2019-06-19 06:46:24 -07:00
{ Ref : 50 , Labels : labels . FromStrings ( "a" , "2" ) } ,
} ,
2019-09-19 02:15:41 -07:00
[ ] record . RefSample {
2019-06-19 06:46:24 -07:00
{ Ref : 50 , T : 80 , V : 1 } ,
{ Ref : 50 , T : 90 , V : 1 } ,
} ,
}
2020-07-21 01:39:02 -07:00
head , w := newTestHead ( t , 1000 , compress )
2019-06-19 06:46:24 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Close ( ) )
2019-06-19 06:46:24 -07:00
} ( )
2018-05-17 06:04:32 -07:00
2019-06-19 06:46:24 -07:00
populateTestWAL ( t , w , entries )
2018-02-07 05:43:21 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Init ( math . MinInt64 ) )
2018-02-07 05:43:21 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Delete ( 0 , 100 , labels . MustNewMatcher ( labels . MatchEqual , "a" , "1" ) ) )
2019-06-19 06:46:24 -07:00
} )
}
2018-02-07 05:43:21 -08:00
}
2017-09-04 06:07:30 -07:00
func TestHeadDeleteSimple ( t * testing . T ) {
2019-01-11 08:34:09 -08:00
buildSmpls := func ( s [ ] int64 ) [ ] sample {
ss := make ( [ ] sample , 0 , len ( s ) )
for _ , t := range s {
ss = append ( ss , sample { t : t , v : float64 ( t ) } )
}
return ss
2017-09-04 06:07:30 -07:00
}
2019-01-11 08:34:09 -08:00
smplsAll := buildSmpls ( [ ] int64 { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } )
2019-08-13 01:34:14 -07:00
lblDefault := labels . Label { Name : "a" , Value : "b" }
2017-08-28 15:39:17 -07:00
2017-09-04 06:07:30 -07:00
cases := [ ] struct {
2020-01-20 07:38:00 -08:00
dranges tombstones . Intervals
addSamples [ ] sample // Samples to add after delete.
smplsExp [ ] sample
2017-09-04 06:07:30 -07:00
} {
{
2019-09-19 02:15:41 -07:00
dranges : tombstones . Intervals { { Mint : 0 , Maxt : 3 } } ,
2019-01-11 08:34:09 -08:00
smplsExp : buildSmpls ( [ ] int64 { 4 , 5 , 6 , 7 , 8 , 9 } ) ,
2017-09-04 06:07:30 -07:00
} ,
{
2019-09-19 02:15:41 -07:00
dranges : tombstones . Intervals { { Mint : 1 , Maxt : 3 } } ,
2019-01-11 08:34:09 -08:00
smplsExp : buildSmpls ( [ ] int64 { 0 , 4 , 5 , 6 , 7 , 8 , 9 } ) ,
2017-09-04 06:07:30 -07:00
} ,
{
2019-09-19 02:15:41 -07:00
dranges : tombstones . Intervals { { Mint : 1 , Maxt : 3 } , { Mint : 4 , Maxt : 7 } } ,
2019-01-11 08:34:09 -08:00
smplsExp : buildSmpls ( [ ] int64 { 0 , 8 , 9 } ) ,
2017-09-04 06:07:30 -07:00
} ,
{
2019-09-19 02:15:41 -07:00
dranges : tombstones . Intervals { { Mint : 1 , Maxt : 3 } , { Mint : 4 , Maxt : 700 } } ,
2019-01-11 08:34:09 -08:00
smplsExp : buildSmpls ( [ ] int64 { 0 } ) ,
2017-09-04 06:07:30 -07:00
} ,
2019-01-08 09:08:41 -08:00
{ // This case is to ensure that labels and symbols are deleted.
2019-09-19 02:15:41 -07:00
dranges : tombstones . Intervals { { Mint : 0 , Maxt : 9 } } ,
2019-01-11 08:34:09 -08:00
smplsExp : buildSmpls ( [ ] int64 { } ) ,
2017-09-04 06:07:30 -07:00
} ,
2020-01-20 07:38:00 -08:00
{
dranges : tombstones . Intervals { { Mint : 1 , Maxt : 3 } } ,
addSamples : buildSmpls ( [ ] int64 { 11 , 13 , 15 } ) ,
smplsExp : buildSmpls ( [ ] int64 { 0 , 4 , 5 , 6 , 7 , 8 , 9 , 11 , 13 , 15 } ) ,
} ,
{
// After delete, the appended samples in the deleted range should be visible
// as the tombstones are clamped to head min/max time.
dranges : tombstones . Intervals { { Mint : 7 , Maxt : 20 } } ,
addSamples : buildSmpls ( [ ] int64 { 11 , 13 , 15 } ) ,
smplsExp : buildSmpls ( [ ] int64 { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 11 , 13 , 15 } ) ,
} ,
2017-09-04 06:07:30 -07:00
}
2017-08-28 15:39:17 -07:00
2019-06-19 06:46:24 -07:00
for _ , compress := range [ ] bool { false , true } {
t . Run ( fmt . Sprintf ( "compress=%t" , compress ) , func ( t * testing . T ) {
for _ , c := range cases {
2020-07-21 01:39:02 -07:00
head , w := newTestHead ( t , 1000 , compress )
2017-08-28 15:39:17 -07:00
2020-07-30 04:11:13 -07:00
app := head . Appender ( context . Background ( ) )
2019-06-19 06:46:24 -07:00
for _ , smpl := range smplsAll {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { lblDefault } , smpl . t , smpl . v )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-08-28 15:39:17 -07:00
2019-06-19 06:46:24 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
2019-06-19 06:46:24 -07:00
// Delete the ranges.
for _ , r := range c . dranges {
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Delete ( r . Mint , r . Maxt , labels . MustNewMatcher ( labels . MatchEqual , lblDefault . Name , lblDefault . Value ) ) )
2019-06-19 06:46:24 -07:00
}
2019-01-08 09:08:41 -08:00
2020-01-20 07:38:00 -08:00
// Add more samples.
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2020-01-20 07:38:00 -08:00
for _ , smpl := range c . addSamples {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { lblDefault } , smpl . t , smpl . v )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-01-20 07:38:00 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-01-20 07:38:00 -08:00
2020-10-19 08:27:08 -07:00
// Compare the samples for both heads - before and after the reloadBlocks.
reloadedW , err := wal . New ( nil , nil , w . Dir ( ) , compress ) // Use a new wal to ensure deleted samples are gone even after a reloadBlocks.
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-09 06:12:48 -08:00
opts := DefaultHeadOptions ( )
opts . ChunkRange = 1000
opts . ChunkDirRoot = reloadedW . Dir ( )
2021-06-05 07:29:32 -07:00
reloadedHead , err := NewHead ( nil , nil , reloadedW , opts , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , reloadedHead . Init ( 0 ) )
2017-08-28 15:39:17 -07:00
2020-10-19 08:27:08 -07:00
// Compare the query results for both heads - before and after the reloadBlocks.
2020-03-23 07:55:25 -07:00
Outer :
2019-06-19 06:46:24 -07:00
for _ , h := range [ ] * Head { head , reloadedHead } {
q , err := NewBlockQuerier ( h , h . MinTime ( ) , h . MaxTime ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-06-09 09:57:31 -07:00
actSeriesSet := q . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , lblDefault . Name , lblDefault . Value ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , q . Close ( ) )
2020-03-23 07:55:25 -07:00
expSeriesSet := newMockSeriesSet ( [ ] storage . Series {
2020-07-31 08:03:02 -07:00
storage . NewListSeries ( labels . Labels { lblDefault } , func ( ) [ ] tsdbutil . Sample {
2020-03-23 07:55:25 -07:00
ss := make ( [ ] tsdbutil . Sample , 0 , len ( c . smplsExp ) )
for _ , s := range c . smplsExp {
ss = append ( ss , s )
}
return ss
} ( ) ,
) ,
} )
2017-08-28 15:39:17 -07:00
2019-06-19 06:46:24 -07:00
for {
eok , rok := expSeriesSet . Next ( ) , actSeriesSet . Next ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , eok , rok )
2017-08-28 15:39:17 -07:00
2019-06-19 06:46:24 -07:00
if ! eok {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
require . NoError ( t , actSeriesSet . Err ( ) )
require . Equal ( t , 0 , len ( actSeriesSet . Warnings ( ) ) )
2019-06-19 06:46:24 -07:00
continue Outer
}
expSeries := expSeriesSet . At ( )
actSeries := actSeriesSet . At ( )
2017-08-28 15:39:17 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , expSeries . Labels ( ) , actSeries . Labels ( ) )
2017-08-28 15:39:17 -07:00
2020-07-31 08:03:02 -07:00
smplExp , errExp := storage . ExpandSamples ( expSeries . Iterator ( ) , nil )
smplRes , errRes := storage . ExpandSamples ( actSeries . Iterator ( ) , nil )
2019-06-19 06:46:24 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , errExp , errRes )
require . Equal ( t , smplExp , smplRes )
2019-06-19 06:46:24 -07:00
}
}
2019-01-08 09:08:41 -08:00
}
2019-06-19 06:46:24 -07:00
} )
2017-09-04 06:07:30 -07:00
}
}
2017-08-28 15:39:17 -07:00
2018-09-17 04:28:55 -07:00
func TestDeleteUntilCurMax ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
hb , _ := newTestHead ( t , 1000000 , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2018-09-17 04:28:55 -07:00
numSamples := int64 ( 10 )
2020-07-30 04:11:13 -07:00
app := hb . Appender ( context . Background ( ) )
2018-09-17 04:28:55 -07:00
smpls := make ( [ ] float64 , numSamples )
for i := int64 ( 0 ) ; i < numSamples ; i ++ {
smpls [ i ] = rand . Float64 ( )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , i , smpls [ i ] )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-17 04:28:55 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
require . NoError ( t , hb . Delete ( 0 , 10000 , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ) )
2018-09-17 04:28:55 -07:00
2020-01-20 07:38:00 -08:00
// Test the series returns no samples. The series is cleared only after compaction.
2018-09-17 04:28:55 -07:00
q , err := NewBlockQuerier ( hb , 0 , 100000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-06-09 09:57:31 -07:00
res := q . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2020-10-29 02:43:23 -07:00
require . True ( t , res . Next ( ) , "series is not present" )
2020-01-20 07:38:00 -08:00
s := res . At ( )
it := s . Iterator ( )
2020-10-29 02:43:23 -07:00
require . False ( t , it . Next ( ) , "expected no samples" )
2020-06-09 09:57:31 -07:00
for res . Next ( ) {
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , res . Err ( ) )
require . Equal ( t , 0 , len ( res . Warnings ( ) ) )
2018-09-17 04:28:55 -07:00
// Add again and test for presence.
2020-07-30 04:11:13 -07:00
app = hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 11 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2018-09-17 04:28:55 -07:00
q , err = NewBlockQuerier ( hb , 0 , 100000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-06-09 09:57:31 -07:00
res = q . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2020-10-29 02:43:23 -07:00
require . True ( t , res . Next ( ) , "series don't exist" )
2018-09-17 04:28:55 -07:00
exps := res . At ( )
2020-01-20 07:38:00 -08:00
it = exps . Iterator ( )
2020-07-31 08:03:02 -07:00
resSamples , err := storage . ExpandSamples ( it , newSample )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , [ ] tsdbutil . Sample { sample { 11 , 1 } } , resSamples )
2020-06-09 09:57:31 -07:00
for res . Next ( ) {
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , res . Err ( ) )
require . Equal ( t , 0 , len ( res . Warnings ( ) ) )
2018-09-17 04:28:55 -07:00
}
2019-04-09 06:16:24 -07:00
func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint ( t * testing . T ) {
2020-05-06 08:30:00 -07:00
numSamples := 10000
2019-04-09 06:16:24 -07:00
// Enough samples to cause a checkpoint.
2020-07-21 01:39:02 -07:00
hb , w := newTestHead ( t , int64 ( numSamples ) * 10 , false )
2020-05-06 08:30:00 -07:00
2019-04-09 06:16:24 -07:00
for i := 0 ; i < numSamples ; i ++ {
2020-07-30 04:11:13 -07:00
app := hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , int64 ( i ) , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2019-04-09 06:16:24 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Delete ( 0 , int64 ( numSamples ) , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ) )
require . NoError ( t , hb . Truncate ( 1 ) )
require . NoError ( t , hb . Close ( ) )
2019-04-09 06:16:24 -07:00
// Confirm there's been a checkpoint.
2020-05-06 08:30:00 -07:00
cdir , _ , err := wal . LastCheckpoint ( w . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-04-09 06:16:24 -07:00
// Read in checkpoint and WAL.
recs := readTestWAL ( t , cdir )
2020-05-06 08:30:00 -07:00
recs = append ( recs , readTestWAL ( t , w . Dir ( ) ) ... )
2019-04-09 06:16:24 -07:00
var series , samples , stones int
for _ , rec := range recs {
switch rec . ( type ) {
2019-09-19 02:15:41 -07:00
case [ ] record . RefSeries :
2019-04-09 06:16:24 -07:00
series ++
2019-09-19 02:15:41 -07:00
case [ ] record . RefSample :
2019-04-09 06:16:24 -07:00
samples ++
2019-09-19 02:15:41 -07:00
case [ ] tombstones . Stone :
2019-04-09 06:16:24 -07:00
stones ++
default :
t . Fatalf ( "unknown record type" )
}
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1 , series )
require . Equal ( t , 9999 , samples )
require . Equal ( t , 1 , stones )
2019-04-09 06:16:24 -07:00
}
2018-09-17 04:28:55 -07:00
func TestDelete_e2e ( t * testing . T ) {
numDatapoints := 1000
numRanges := 1000
timeInterval := int64 ( 2 )
// Create 8 series with 1000 data-points of different ranges, delete and run queries.
lbls := [ ] [ ] labels . Label {
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2018-09-17 04:28:55 -07:00
} ,
}
2019-01-28 03:24:49 -08:00
seriesMap := map [ string ] [ ] tsdbutil . Sample { }
2018-09-17 04:28:55 -07:00
for _ , l := range lbls {
2019-01-28 03:24:49 -08:00
seriesMap [ labels . New ( l ... ) . String ( ) ] = [ ] tsdbutil . Sample { }
2018-09-17 04:28:55 -07:00
}
2020-05-06 08:30:00 -07:00
2020-07-21 01:39:02 -07:00
hb , _ := newTestHead ( t , 100000 , false )
2019-03-19 06:31:57 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2019-03-19 06:31:57 -07:00
} ( )
2020-05-06 08:30:00 -07:00
2020-07-30 04:11:13 -07:00
app := hb . Appender ( context . Background ( ) )
2018-09-17 04:28:55 -07:00
for _ , l := range lbls {
ls := labels . New ( l ... )
2019-01-28 03:24:49 -08:00
series := [ ] tsdbutil . Sample { }
2018-09-17 04:28:55 -07:00
ts := rand . Int63n ( 300 )
for i := 0 ; i < numDatapoints ; i ++ {
v := rand . Float64 ( )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , ls , ts , v )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-17 04:28:55 -07:00
series = append ( series , sample { ts , v } )
ts += rand . Int63n ( timeInterval ) + 1
}
seriesMap [ labels . New ( l ... ) . String ( ) ] = series
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2018-09-17 04:28:55 -07:00
// Delete a time-range from each-selector.
dels := [ ] struct {
2019-11-18 11:53:33 -08:00
ms [ ] * labels . Matcher
2019-09-19 02:15:41 -07:00
drange tombstones . Intervals
2018-09-17 04:28:55 -07:00
} {
{
2019-11-18 11:53:33 -08:00
ms : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) } ,
2019-09-19 02:15:41 -07:00
drange : tombstones . Intervals { { Mint : 300 , Maxt : 500 } , { Mint : 600 , Maxt : 670 } } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-11-18 11:53:33 -08:00
ms : [ ] * labels . Matcher {
labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ,
labels . MustNewMatcher ( labels . MatchEqual , "job" , "prom-k8s" ) ,
2018-09-17 04:28:55 -07:00
} ,
2019-09-19 02:15:41 -07:00
drange : tombstones . Intervals { { Mint : 300 , Maxt : 500 } , { Mint : 100 , Maxt : 670 } } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-11-18 11:53:33 -08:00
ms : [ ] * labels . Matcher {
labels . MustNewMatcher ( labels . MatchEqual , "a" , "c" ) ,
labels . MustNewMatcher ( labels . MatchEqual , "instance" , "localhost:9090" ) ,
labels . MustNewMatcher ( labels . MatchEqual , "job" , "prometheus" ) ,
2018-09-17 04:28:55 -07:00
} ,
2019-09-19 02:15:41 -07:00
drange : tombstones . Intervals { { Mint : 300 , Maxt : 400 } , { Mint : 100 , Maxt : 6700 } } ,
2018-09-17 04:28:55 -07:00
} ,
// TODO: Add Regexp Matchers.
}
for _ , del := range dels {
for _ , r := range del . drange {
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Delete ( r . Mint , r . Maxt , del . ms ... ) )
2018-09-17 04:28:55 -07:00
}
matched := labels . Slice { }
for _ , ls := range lbls {
s := labels . Selector ( del . ms )
if s . Matches ( ls ) {
matched = append ( matched , ls )
}
}
sort . Sort ( matched )
for i := 0 ; i < numRanges ; i ++ {
q , err := NewBlockQuerier ( hb , 0 , 100000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-17 04:28:55 -07:00
defer q . Close ( )
2020-06-09 09:57:31 -07:00
ss := q . Select ( true , nil , del . ms ... )
2018-09-17 04:28:55 -07:00
// Build the mockSeriesSet.
2020-02-06 07:58:38 -08:00
matchedSeries := make ( [ ] storage . Series , 0 , len ( matched ) )
2018-09-17 04:28:55 -07:00
for _ , m := range matched {
smpls := seriesMap [ m . String ( ) ]
smpls = deletedSamples ( smpls , del . drange )
// Only append those series for which samples exist as mockSeriesSet
// doesn't skip series with no samples.
2020-07-31 08:03:02 -07:00
// TODO: But sometimes SeriesSet returns an empty chunkenc.Iterator
2018-09-17 04:28:55 -07:00
if len ( smpls ) > 0 {
2020-07-31 08:03:02 -07:00
matchedSeries = append ( matchedSeries , storage . NewListSeries ( m , smpls ) )
2018-09-17 04:28:55 -07:00
}
}
2018-09-21 01:07:35 -07:00
expSs := newMockSeriesSet ( matchedSeries )
2018-09-17 04:28:55 -07:00
// Compare both SeriesSets.
for {
eok , rok := expSs . Next ( ) , ss . Next ( )
// Skip a series if iterator is empty.
if rok {
for ! ss . At ( ) . Iterator ( ) . Next ( ) {
rok = ss . Next ( )
if ! rok {
break
}
}
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , eok , rok )
2018-09-17 04:28:55 -07:00
if ! eok {
break
}
sexp := expSs . At ( )
sres := ss . At ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , sexp . Labels ( ) , sres . Labels ( ) )
2020-07-31 08:03:02 -07:00
smplExp , errExp := storage . ExpandSamples ( sexp . Iterator ( ) , nil )
smplRes , errRes := storage . ExpandSamples ( sres . Iterator ( ) , nil )
2020-10-29 02:43:23 -07:00
require . Equal ( t , errExp , errRes )
require . Equal ( t , smplExp , smplRes )
2018-09-17 04:28:55 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , ss . Err ( ) )
require . Equal ( t , 0 , len ( ss . Warnings ( ) ) )
2018-09-17 04:28:55 -07:00
}
}
}
2017-05-19 10:24:29 -07:00
2019-02-14 05:29:41 -08:00
func boundedSamples ( full [ ] tsdbutil . Sample , mint , maxt int64 ) [ ] tsdbutil . Sample {
2017-05-05 06:52:07 -07:00
for len ( full ) > 0 {
2019-02-14 05:29:41 -08:00
if full [ 0 ] . T ( ) >= mint {
2017-05-03 10:20:34 -07:00
break
}
2017-05-05 06:52:07 -07:00
full = full [ 1 : ]
2017-05-03 10:20:34 -07:00
}
2017-05-05 06:52:07 -07:00
for i , s := range full {
2017-09-05 02:45:18 -07:00
// labels.Labelinate on the first sample larger than maxt.
2019-02-14 05:29:41 -08:00
if s . T ( ) > maxt {
2017-05-05 06:52:07 -07:00
return full [ : i ]
2017-05-03 10:20:34 -07:00
}
}
2017-05-05 06:52:07 -07:00
// maxt is after highest sample.
return full
2017-05-03 10:20:34 -07:00
}
2017-05-19 10:24:29 -07:00
2019-09-19 02:15:41 -07:00
func deletedSamples ( full [ ] tsdbutil . Sample , dranges tombstones . Intervals ) [ ] tsdbutil . Sample {
2019-01-28 03:24:49 -08:00
ds := make ( [ ] tsdbutil . Sample , 0 , len ( full ) )
2017-05-19 10:24:29 -07:00
Outer :
for _ , s := range full {
for _ , r := range dranges {
2019-09-19 02:15:41 -07:00
if r . InBounds ( s . T ( ) ) {
2017-05-19 10:24:29 -07:00
continue Outer
}
}
ds = append ( ds , s )
}
return ds
}
2017-06-07 04:42:53 -07:00
func TestComputeChunkEndTime ( t * testing . T ) {
cases := [ ] struct {
start , cur , max int64
res int64
} {
{
start : 0 ,
cur : 250 ,
max : 1000 ,
res : 1000 ,
} ,
{
start : 100 ,
cur : 200 ,
max : 1000 ,
res : 550 ,
} ,
// Case where we fit floored 0 chunks. Must catch division by 0
// and default to maximum time.
{
start : 0 ,
cur : 500 ,
max : 1000 ,
res : 1000 ,
} ,
2018-04-08 02:28:30 -07:00
// Catch division by zero for cur == start. Strictly not a possible case.
2017-06-07 04:42:53 -07:00
{
start : 100 ,
cur : 100 ,
max : 1000 ,
res : 104 ,
} ,
}
for _ , c := range cases {
got := computeChunkEndTime ( c . start , c . cur , c . max )
if got != c . res {
t . Errorf ( "expected %d for (start: %d, cur: %d, max: %d), got %d" , c . res , c . start , c . cur , c . max , got )
}
}
}
2017-10-25 00:32:06 -07:00
func TestMemSeries_append ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2020-05-06 08:30:00 -07:00
// This is usually taken from the Head, but passing manually here.
2022-01-10 05:36:45 -08:00
chunkDiskMapper , err := chunks . NewChunkDiskMapper ( nil , dir , chunkenc . NewPool ( ) , chunks . DefaultWriteBufferSize , chunks . DefaultWriteQueueSize )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , chunkDiskMapper . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2021-11-19 02:11:32 -08:00
s := newMemSeries ( labels . Labels { } , 1 , 500 , nil , defaultIsolationDisabled )
2017-10-25 00:32:06 -07:00
// Add first two samples at the very end of a chunk range and the next two
// on and after it.
// New chunk must correctly be cut at 1000.
2020-05-06 08:30:00 -07:00
ok , chunkCreated := s . append ( 998 , 1 , 0 , chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "append failed" )
require . True ( t , chunkCreated , "first sample created chunk" )
2017-10-25 00:32:06 -07:00
2020-05-06 08:30:00 -07:00
ok , chunkCreated = s . append ( 999 , 2 , 0 , chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "append failed" )
require . False ( t , chunkCreated , "second sample should use same chunk" )
2017-10-25 00:32:06 -07:00
2020-05-06 08:30:00 -07:00
ok , chunkCreated = s . append ( 1000 , 3 , 0 , chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "append failed" )
require . True ( t , chunkCreated , "expected new chunk on boundary" )
2017-10-25 00:32:06 -07:00
2020-05-06 08:30:00 -07:00
ok , chunkCreated = s . append ( 1001 , 4 , 0 , chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "append failed" )
require . False ( t , chunkCreated , "second sample should use same chunk" )
2017-10-25 00:32:06 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1 , len ( s . mmappedChunks ) , "there should be only 1 mmapped chunk" )
require . Equal ( t , int64 ( 998 ) , s . mmappedChunks [ 0 ] . minTime , "wrong chunk range" )
require . Equal ( t , int64 ( 999 ) , s . mmappedChunks [ 0 ] . maxTime , "wrong chunk range" )
require . Equal ( t , int64 ( 1000 ) , s . headChunk . minTime , "wrong chunk range" )
require . Equal ( t , int64 ( 1001 ) , s . headChunk . maxTime , "wrong chunk range" )
2017-10-25 00:32:06 -07:00
// Fill the range [1000,2000) with many samples. Intermediate chunks should be cut
// at approximately 120 samples per chunk.
for i := 1 ; i < 1000 ; i ++ {
2020-05-06 08:30:00 -07:00
ok , _ := s . append ( 1001 + int64 ( i ) , float64 ( i ) , 0 , chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "append failed" )
2017-10-25 00:32:06 -07:00
}
2020-10-29 02:43:23 -07:00
require . Greater ( t , len ( s . mmappedChunks ) + 1 , 7 , "expected intermediate chunks" )
2017-10-25 00:32:06 -07:00
// All chunks but the first and last should now be moderately full.
2020-05-06 08:30:00 -07:00
for i , c := range s . mmappedChunks [ 1 : ] {
chk , err := chunkDiskMapper . Chunk ( c . ref )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Greater ( t , chk . NumSamples ( ) , 100 , "unexpected small chunk %d of length %d" , i , chk . NumSamples ( ) )
2017-10-25 00:32:06 -07:00
}
}
2017-12-13 12:58:21 -08:00
2022-04-20 05:54:20 -07:00
func TestMemSeries_append_atVariableRate ( t * testing . T ) {
const samplesPerChunk = 120
dir := t . TempDir ( )
// This is usually taken from the Head, but passing manually here.
chunkDiskMapper , err := chunks . NewChunkDiskMapper ( nil , dir , chunkenc . NewPool ( ) , chunks . DefaultWriteBufferSize , chunks . DefaultWriteQueueSize )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , chunkDiskMapper . Close ( ) )
} )
s := newMemSeries ( labels . Labels { } , 1 , DefaultBlockDuration , nil , defaultIsolationDisabled )
// At this slow rate, we will fill the chunk in two block durations.
slowRate := ( DefaultBlockDuration * 2 ) / samplesPerChunk
var nextTs int64
var totalAppendedSamples int
for i := 0 ; i < samplesPerChunk / 4 ; i ++ {
ok , _ := s . append ( nextTs , float64 ( i ) , 0 , chunkDiskMapper )
require . Truef ( t , ok , "slow sample %d was not appended" , i )
nextTs += slowRate
totalAppendedSamples ++
}
require . Equal ( t , DefaultBlockDuration , s . nextAt , "after appending a samplesPerChunk/4 samples at a slow rate, we should aim to cut a new block at the default block duration %d, but it's set to %d" , DefaultBlockDuration , s . nextAt )
// Suddenly, the rate increases and we receive a sample every millisecond.
for i := 0 ; i < math . MaxUint16 ; i ++ {
ok , _ := s . append ( nextTs , float64 ( i ) , 0 , chunkDiskMapper )
require . Truef ( t , ok , "quick sample %d was not appended" , i )
nextTs ++
totalAppendedSamples ++
}
ok , chunkCreated := s . append ( DefaultBlockDuration , float64 ( 0 ) , 0 , chunkDiskMapper )
require . True ( t , ok , "new chunk sample was not appended" )
require . True ( t , chunkCreated , "sample at block duration timestamp should create a new chunk" )
var totalSamplesInChunks int
for i , c := range s . mmappedChunks {
totalSamplesInChunks += int ( c . numSamples )
require . LessOrEqualf ( t , c . numSamples , uint16 ( 2 * samplesPerChunk ) , "mmapped chunk %d has more than %d samples" , i , 2 * samplesPerChunk )
}
require . Equal ( t , totalAppendedSamples , totalSamplesInChunks , "wrong number of samples in %d mmapped chunks" , len ( s . mmappedChunks ) )
}
2017-12-13 12:58:21 -08:00
func TestGCChunkAccess ( t * testing . T ) {
// Put a chunk, select it. GC it and then access it.
2020-07-21 01:39:02 -07:00
h , _ := newTestHead ( t , 1000 , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2017-12-13 12:58:21 -08:00
h . initTime ( 0 )
2020-05-20 06:22:08 -07:00
s , _ , _ := h . getOrCreate ( 1 , labels . FromStrings ( "a" , "1" ) )
2020-02-06 04:55:00 -08:00
// Appending 2 samples for the first chunk.
2020-05-06 08:30:00 -07:00
ok , chunkCreated := s . append ( 0 , 0 , 0 , h . chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . True ( t , chunkCreated , "chunks was not created" )
2020-05-06 08:30:00 -07:00
ok , chunkCreated = s . append ( 999 , 999 , 0 , h . chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . False ( t , chunkCreated , "chunks was created" )
2020-02-06 04:55:00 -08:00
// A new chunks should be created here as it's beyond the chunk range.
2020-05-06 08:30:00 -07:00
ok , chunkCreated = s . append ( 1000 , 1000 , 0 , h . chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . True ( t , chunkCreated , "chunks was not created" )
2020-05-06 08:30:00 -07:00
ok , chunkCreated = s . append ( 1999 , 1999 , 0 , h . chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . False ( t , chunkCreated , "chunks was created" )
2017-12-13 12:58:21 -08:00
idx := h . indexRange ( 0 , 1500 )
var (
lset labels . Labels
2017-12-21 02:55:58 -08:00
chunks [ ] chunks . Meta
2017-12-13 12:58:21 -08:00
)
2020-10-29 02:43:23 -07:00
require . NoError ( t , idx . Series ( 1 , & lset , & chunks ) )
2017-12-13 12:58:21 -08:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , labels . Labels { {
2017-12-13 12:58:21 -08:00
Name : "a" , Value : "1" ,
} } , lset )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 2 , len ( chunks ) )
2017-12-13 12:58:21 -08:00
2020-05-22 02:03:23 -07:00
cr , err := h . chunksRange ( 0 , 1500 , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-22 02:03:23 -07:00
_ , err = cr . Chunk ( chunks [ 0 ] . Ref )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-13 12:58:21 -08:00
_ , err = cr . Chunk ( chunks [ 1 ] . Ref )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-13 12:58:21 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 1500 ) ) // Remove a chunk.
2017-12-13 12:58:21 -08:00
_ , err = cr . Chunk ( chunks [ 0 ] . Ref )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrNotFound , err )
2017-12-13 12:58:21 -08:00
_ , err = cr . Chunk ( chunks [ 1 ] . Ref )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-13 12:58:21 -08:00
}
func TestGCSeriesAccess ( t * testing . T ) {
// Put a series, select it. GC it and then access it.
2020-07-21 01:39:02 -07:00
h , _ := newTestHead ( t , 1000 , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2017-12-13 12:58:21 -08:00
h . initTime ( 0 )
2020-05-20 06:22:08 -07:00
s , _ , _ := h . getOrCreate ( 1 , labels . FromStrings ( "a" , "1" ) )
2020-02-06 04:55:00 -08:00
// Appending 2 samples for the first chunk.
2020-05-06 08:30:00 -07:00
ok , chunkCreated := s . append ( 0 , 0 , 0 , h . chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . True ( t , chunkCreated , "chunks was not created" )
2020-05-06 08:30:00 -07:00
ok , chunkCreated = s . append ( 999 , 999 , 0 , h . chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . False ( t , chunkCreated , "chunks was created" )
2020-02-06 04:55:00 -08:00
// A new chunks should be created here as it's beyond the chunk range.
2020-05-06 08:30:00 -07:00
ok , chunkCreated = s . append ( 1000 , 1000 , 0 , h . chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . True ( t , chunkCreated , "chunks was not created" )
2020-05-06 08:30:00 -07:00
ok , chunkCreated = s . append ( 1999 , 1999 , 0 , h . chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . False ( t , chunkCreated , "chunks was created" )
2017-12-13 12:58:21 -08:00
idx := h . indexRange ( 0 , 2000 )
var (
lset labels . Labels
2017-12-21 02:55:58 -08:00
chunks [ ] chunks . Meta
2017-12-13 12:58:21 -08:00
)
2020-10-29 02:43:23 -07:00
require . NoError ( t , idx . Series ( 1 , & lset , & chunks ) )
2017-12-13 12:58:21 -08:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , labels . Labels { {
2017-12-13 12:58:21 -08:00
Name : "a" , Value : "1" ,
} } , lset )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 2 , len ( chunks ) )
2017-12-13 12:58:21 -08:00
2020-05-22 02:03:23 -07:00
cr , err := h . chunksRange ( 0 , 2000 , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-22 02:03:23 -07:00
_ , err = cr . Chunk ( chunks [ 0 ] . Ref )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-13 12:58:21 -08:00
_ , err = cr . Chunk ( chunks [ 1 ] . Ref )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-13 12:58:21 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 2000 ) ) // Remove the series.
2017-12-13 12:58:21 -08:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , ( * memSeries ) ( nil ) , h . series . getByID ( 1 ) )
2017-12-13 12:58:21 -08:00
_ , err = cr . Chunk ( chunks [ 0 ] . Ref )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrNotFound , err )
2017-12-13 12:58:21 -08:00
_ , err = cr . Chunk ( chunks [ 1 ] . Ref )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrNotFound , err )
2017-12-13 12:58:21 -08:00
}
2018-06-28 06:04:07 -07:00
2018-09-17 09:58:42 -07:00
func TestUncommittedSamplesNotLostOnTruncate ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
h , _ := newTestHead ( t , 1000 , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2018-09-17 09:58:42 -07:00
h . initTime ( 0 )
2020-07-30 04:48:47 -07:00
app := h . appender ( )
2018-09-17 09:58:42 -07:00
lset := labels . FromStrings ( "a" , "1" )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , lset , 2100 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-17 09:58:42 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 2000 ) )
require . NotNil ( t , h . series . getByHash ( lset . Hash ( ) , lset ) , "series should not have been garbage collected" )
2018-09-17 09:58:42 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2018-09-17 09:58:42 -07:00
q , err := NewBlockQuerier ( h , 1500 , 2500 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-17 09:58:42 -07:00
defer q . Close ( )
2020-06-09 09:57:31 -07:00
ss := q . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "1" ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , true , ss . Next ( ) )
2020-06-09 09:57:31 -07:00
for ss . Next ( ) {
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , ss . Err ( ) )
require . Equal ( t , 0 , len ( ss . Warnings ( ) ) )
2018-09-17 09:58:42 -07:00
}
func TestRemoveSeriesAfterRollbackAndTruncate ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
h , _ := newTestHead ( t , 1000 , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2018-09-17 09:58:42 -07:00
h . initTime ( 0 )
2020-07-30 04:48:47 -07:00
app := h . appender ( )
2018-09-17 09:58:42 -07:00
lset := labels . FromStrings ( "a" , "1" )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , lset , 2100 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-17 09:58:42 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 2000 ) )
require . NotNil ( t , h . series . getByHash ( lset . Hash ( ) , lset ) , "series should not have been garbage collected" )
2018-09-17 09:58:42 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Rollback ( ) )
2018-09-17 09:58:42 -07:00
q , err := NewBlockQuerier ( h , 1500 , 2500 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-17 09:58:42 -07:00
2020-06-09 09:57:31 -07:00
ss := q . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "1" ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , false , ss . Next ( ) )
require . Equal ( t , 0 , len ( ss . Warnings ( ) ) )
2021-07-20 01:47:20 -07:00
require . NoError ( t , q . Close ( ) )
2018-09-17 09:58:42 -07:00
// Truncate again, this time the series should be deleted
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 2050 ) )
require . Equal ( t , ( * memSeries ) ( nil ) , h . series . getByHash ( lset . Hash ( ) , lset ) )
2018-09-17 09:58:42 -07:00
}
2018-06-28 06:04:07 -07:00
func TestHead_LogRollback ( t * testing . T ) {
2019-06-19 06:46:24 -07:00
for _ , compress := range [ ] bool { false , true } {
t . Run ( fmt . Sprintf ( "compress=%t" , compress ) , func ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
h , w := newTestHead ( t , 1000 , compress )
2019-06-19 06:46:24 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2019-06-19 06:46:24 -07:00
} ( )
2018-05-17 06:04:32 -07:00
2020-07-30 04:11:13 -07:00
app := h . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 1 , 2 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-28 06:04:07 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Rollback ( ) )
2019-06-19 06:46:24 -07:00
recs := readTestWAL ( t , w . Dir ( ) )
2018-05-17 06:04:32 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1 , len ( recs ) )
2018-06-28 06:04:07 -07:00
2019-09-19 02:15:41 -07:00
series , ok := recs [ 0 ] . ( [ ] record . RefSeries )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "expected series record but got %+v" , recs [ 0 ] )
require . Equal ( t , [ ] record . RefSeries { { Ref : 1 , Labels : labels . FromStrings ( "a" , "b" ) } } , series )
2019-06-19 06:46:24 -07:00
} )
}
2018-06-28 06:04:07 -07:00
}
2018-11-30 03:37:04 -08:00
2019-06-12 07:10:37 -07:00
// TestWalRepair_DecodingError ensures that a repair is run for an error
// when decoding a record.
func TestWalRepair_DecodingError ( t * testing . T ) {
2019-09-19 02:15:41 -07:00
var enc record . Encoder
2018-11-30 03:37:04 -08:00
for name , test := range map [ string ] struct {
corrFunc func ( rec [ ] byte ) [ ] byte // Func that applies the corruption to a record.
rec [ ] byte
totalRecs int
expRecs int
} {
"decode_series" : {
func ( rec [ ] byte ) [ ] byte {
return rec [ : 3 ]
} ,
2019-09-19 02:15:41 -07:00
enc . Series ( [ ] record . RefSeries { { Ref : 1 , Labels : labels . FromStrings ( "a" , "b" ) } } , [ ] byte { } ) ,
2018-11-30 03:37:04 -08:00
9 ,
5 ,
} ,
"decode_samples" : {
func ( rec [ ] byte ) [ ] byte {
return rec [ : 3 ]
} ,
2019-09-19 02:15:41 -07:00
enc . Samples ( [ ] record . RefSample { { Ref : 0 , T : 99 , V : 1 } } , [ ] byte { } ) ,
2018-11-30 03:37:04 -08:00
9 ,
5 ,
} ,
"decode_tombstone" : {
func ( rec [ ] byte ) [ ] byte {
return rec [ : 3 ]
} ,
2019-09-19 02:15:41 -07:00
enc . Tombstones ( [ ] tombstones . Stone { { Ref : 1 , Intervals : tombstones . Intervals { } } } , [ ] byte { } ) ,
2018-11-30 03:37:04 -08:00
9 ,
5 ,
} ,
} {
2019-06-19 06:46:24 -07:00
for _ , compress := range [ ] bool { false , true } {
t . Run ( fmt . Sprintf ( "%s,compress=%t" , name , compress ) , func ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2018-11-30 03:37:04 -08:00
2019-06-19 06:46:24 -07:00
// Fill the wal and corrupt it.
{
w , err := wal . New ( nil , nil , filepath . Join ( dir , "wal" ) , compress )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-06-19 06:46:24 -07:00
for i := 1 ; i <= test . totalRecs ; i ++ {
// At this point insert a corrupted record.
if i - 1 == test . expRecs {
2020-10-29 02:43:23 -07:00
require . NoError ( t , w . Log ( test . corrFunc ( test . rec ) ) )
2019-06-19 06:46:24 -07:00
continue
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , w . Log ( test . rec ) )
2019-06-14 08:39:22 -07:00
}
2021-02-09 06:12:48 -08:00
opts := DefaultHeadOptions ( )
opts . ChunkRange = 1
opts . ChunkDirRoot = w . Dir ( )
2021-06-05 07:29:32 -07:00
h , err := NewHead ( nil , nil , w , opts , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( h . metrics . walCorruptionsTotal ) )
2019-06-19 06:46:24 -07:00
initErr := h . Init ( math . MinInt64 )
2019-06-14 08:39:22 -07:00
2019-06-19 06:46:24 -07:00
err = errors . Cause ( initErr ) // So that we can pick up errors even if wrapped.
_ , corrErr := err . ( * wal . CorruptionErr )
2020-10-29 02:43:23 -07:00
require . True ( t , corrErr , "reading the wal didn't return corruption error" )
2021-10-25 23:06:25 -07:00
require . NoError ( t , h . Close ( ) ) // Head will close the wal as well.
2019-06-19 06:46:24 -07:00
}
2018-11-30 03:37:04 -08:00
2019-06-19 06:46:24 -07:00
// Open the db to trigger a repair.
{
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , nil , DefaultOptions ( ) , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-06-19 06:46:24 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-06-19 06:46:24 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . walCorruptionsTotal ) )
2019-06-19 06:46:24 -07:00
}
2018-11-30 03:37:04 -08:00
2019-06-19 06:46:24 -07:00
// Read the wal content after the repair.
{
sr , err := wal . NewSegmentsReader ( filepath . Join ( dir , "wal" ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-06-19 06:46:24 -07:00
defer sr . Close ( )
r := wal . NewReader ( sr )
2018-11-30 03:37:04 -08:00
2019-06-19 06:46:24 -07:00
var actRec int
for r . Next ( ) {
actRec ++
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , r . Err ( ) )
require . Equal ( t , test . expRecs , actRec , "Wrong number of intact records" )
2019-06-14 08:39:22 -07:00
}
2019-06-19 06:46:24 -07:00
} )
}
2018-11-30 03:37:04 -08:00
}
}
2019-06-07 03:35:02 -07:00
2020-05-06 08:30:00 -07:00
func TestHeadReadWriterRepair ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2019-06-07 03:35:02 -07:00
2020-05-29 07:38:41 -07:00
const chunkRange = 1000
2020-05-06 08:30:00 -07:00
walDir := filepath . Join ( dir , "wal" )
// Fill the chunk segments and corrupt it.
{
w , err := wal . New ( nil , nil , walDir , false )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
2021-02-09 06:12:48 -08:00
opts := DefaultHeadOptions ( )
opts . ChunkRange = chunkRange
opts . ChunkDirRoot = dir
2021-06-05 07:29:32 -07:00
h , err := NewHead ( nil , nil , w , opts , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( h . metrics . mmapChunkCorruptionTotal ) )
require . NoError ( t , h . Init ( math . MinInt64 ) )
2020-05-06 08:30:00 -07:00
2020-05-20 06:22:08 -07:00
s , created , _ := h . getOrCreate ( 1 , labels . FromStrings ( "a" , "1" ) )
2020-10-29 02:43:23 -07:00
require . True ( t , created , "series was not created" )
2020-05-06 08:30:00 -07:00
for i := 0 ; i < 7 ; i ++ {
2020-05-29 07:38:41 -07:00
ok , chunkCreated := s . append ( int64 ( i * chunkRange ) , float64 ( i * chunkRange ) , 0 , h . chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . True ( t , chunkCreated , "chunk was not created" )
2020-05-29 07:38:41 -07:00
ok , chunkCreated = s . append ( int64 ( i * chunkRange ) + chunkRange - 1 , float64 ( i * chunkRange ) , 0 , h . chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . False ( t , chunkCreated , "chunk was created" )
2022-01-10 05:36:45 -08:00
h . chunkDiskMapper . CutNewFile ( )
2020-05-06 08:30:00 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
2022-01-10 05:36:45 -08:00
// Verify that there are 6 segment files.
// It should only be 6 because the last call to .CutNewFile() won't
// take effect without another chunk being written.
2022-04-27 02:24:36 -07:00
files , err := os . ReadDir ( mmappedChunksDir ( dir ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2022-01-10 05:36:45 -08:00
require . Equal ( t , 6 , len ( files ) )
2020-05-06 08:30:00 -07:00
// Corrupt the 4th file by writing a random byte to series ref.
2021-10-22 01:06:44 -07:00
f , err := os . OpenFile ( filepath . Join ( mmappedChunksDir ( dir ) , files [ 3 ] . Name ( ) ) , os . O_WRONLY , 0 o666 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
n , err := f . WriteAt ( [ ] byte { 67 , 88 } , chunks . HeadChunkFileHeaderSize + 2 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 2 , n )
require . NoError ( t , f . Close ( ) )
2020-05-06 08:30:00 -07:00
}
// Open the db to trigger a repair.
{
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , nil , DefaultOptions ( ) , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . mmapChunkCorruptionTotal ) )
2020-05-06 08:30:00 -07:00
}
// Verify that there are 3 segment files after the repair.
// The segments from the corrupt segment should be removed.
{
2022-04-27 02:24:36 -07:00
files , err := os . ReadDir ( mmappedChunksDir ( dir ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 3 , len ( files ) )
2020-05-06 08:30:00 -07:00
}
}
func TestNewWalSegmentOnTruncate ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
h , wlog := newTestHead ( t , 1000 , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2019-06-07 03:35:02 -07:00
add := func ( ts int64 ) {
2020-07-30 04:11:13 -07:00
app := h . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , ts , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2019-06-07 03:35:02 -07:00
}
add ( 0 )
2020-09-01 02:16:57 -07:00
_ , last , err := wal . Segments ( wlog . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , last )
2019-06-07 03:35:02 -07:00
add ( 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 1 ) )
2020-09-01 02:16:57 -07:00
_ , last , err = wal . Segments ( wlog . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 1 , last )
2019-06-07 03:35:02 -07:00
add ( 2 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 2 ) )
2020-09-01 02:16:57 -07:00
_ , last , err = wal . Segments ( wlog . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 2 , last )
2019-06-07 03:35:02 -07:00
}
2020-01-20 03:05:27 -08:00
func TestAddDuplicateLabelName ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
h , _ := newTestHead ( t , 1000 , false )
2020-01-20 03:05:27 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-01-20 03:05:27 -08:00
} ( )
add := func ( labels labels . Labels , labelName string ) {
2020-07-30 04:11:13 -07:00
app := h . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . Error ( t , err )
require . Equal ( t , fmt . Sprintf ( ` label name "%s" is not unique: invalid sample ` , labelName ) , err . Error ( ) )
2020-01-20 03:05:27 -08:00
}
add ( labels . Labels { { Name : "a" , Value : "c" } , { Name : "a" , Value : "b" } } , "a" )
add ( labels . Labels { { Name : "a" , Value : "c" } , { Name : "a" , Value : "c" } } , "a" )
add ( labels . Labels { { Name : "__name__" , Value : "up" } , { Name : "job" , Value : "prometheus" } , { Name : "le" , Value : "500" } , { Name : "le" , Value : "400" } , { Name : "unit" , Value : "s" } } , "le" )
}
2020-01-21 11:30:20 -08:00
2020-02-17 10:37:09 -08:00
func TestMemSeriesIsolation ( t * testing . T ) {
2021-11-19 02:11:32 -08:00
if defaultIsolationDisabled {
t . Skip ( "skipping test since tsdb isolation is disabled" )
}
2020-02-17 10:37:09 -08:00
// Put a series, select it. GC it and then access it.
2020-05-06 08:30:00 -07:00
lastValue := func ( h * Head , maxAppendID uint64 ) int {
idx , err := h . Index ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-17 10:37:09 -08:00
2021-07-20 01:47:20 -07:00
iso := h . iso . State ( math . MinInt64 , math . MaxInt64 )
2020-02-12 11:22:27 -08:00
iso . maxAppendID = maxAppendID
2020-02-17 10:37:09 -08:00
2020-05-22 02:03:23 -07:00
chunks , err := h . chunksRange ( math . MinInt64 , math . MaxInt64 , iso )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-31 08:03:02 -07:00
// Hm.. here direct block chunk querier might be required?
querier := blockQuerier {
blockBaseQuerier : & blockBaseQuerier {
index : idx ,
chunks : chunks ,
tombstones : tombstones . NewMemTombstones ( ) ,
mint : 0 ,
maxt : 10000 ,
} ,
2020-02-17 10:37:09 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-17 10:37:09 -08:00
defer querier . Close ( )
2020-06-09 09:57:31 -07:00
ss := querier . Select ( false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
_ , seriesSet , ws , err := expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , len ( ws ) )
2020-02-17 10:37:09 -08:00
for _ , series := range seriesSet {
return int ( series [ len ( series ) - 1 ] . v )
}
return - 1
}
2020-05-06 08:30:00 -07:00
addSamples := func ( h * Head ) int {
i := 1
for ; i <= 1000 ; i ++ {
var app storage . Appender
// To initialize bounds.
if h . MinTime ( ) == math . MaxInt64 {
app = & initAppender { head : h }
} else {
2020-07-30 04:48:47 -07:00
a := h . appender ( )
2020-05-06 08:30:00 -07:00
a . cleanupAppendIDsBelow = 0
app = a
}
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , int64 ( i ) , float64 ( i ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2020-02-17 10:37:09 -08:00
}
2020-05-06 08:30:00 -07:00
return i
}
2020-02-17 10:37:09 -08:00
2020-05-06 08:30:00 -07:00
testIsolation := func ( h * Head , i int ) {
2020-02-17 10:37:09 -08:00
}
2020-05-06 08:30:00 -07:00
// Test isolation without restart of Head.
2020-07-21 01:39:02 -07:00
hb , _ := newTestHead ( t , 1000 , false )
2020-05-06 08:30:00 -07:00
i := addSamples ( hb )
testIsolation ( hb , i )
2020-02-12 11:22:27 -08:00
// Test simple cases in different chunks when no appendID cleanup has been performed.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 10 , lastValue ( hb , 10 ) )
require . Equal ( t , 130 , lastValue ( hb , 130 ) )
require . Equal ( t , 160 , lastValue ( hb , 160 ) )
require . Equal ( t , 240 , lastValue ( hb , 240 ) )
require . Equal ( t , 500 , lastValue ( hb , 500 ) )
require . Equal ( t , 750 , lastValue ( hb , 750 ) )
require . Equal ( t , 995 , lastValue ( hb , 995 ) )
require . Equal ( t , 999 , lastValue ( hb , 999 ) )
2020-02-17 10:37:09 -08:00
2020-02-12 11:22:27 -08:00
// Cleanup appendIDs below 500.
2020-07-30 04:48:47 -07:00
app := hb . appender ( )
2020-04-17 11:51:03 -07:00
app . cleanupAppendIDsBelow = 500
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , int64 ( i ) , float64 ( i ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2020-02-17 10:37:09 -08:00
i ++
2020-02-12 11:22:27 -08:00
// We should not get queries with a maxAppendID below 500 after the cleanup,
// but they only take the remaining appendIDs into account.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 499 , lastValue ( hb , 10 ) )
require . Equal ( t , 499 , lastValue ( hb , 130 ) )
require . Equal ( t , 499 , lastValue ( hb , 160 ) )
require . Equal ( t , 499 , lastValue ( hb , 240 ) )
require . Equal ( t , 500 , lastValue ( hb , 500 ) )
require . Equal ( t , 995 , lastValue ( hb , 995 ) )
require . Equal ( t , 999 , lastValue ( hb , 999 ) )
2020-02-17 10:37:09 -08:00
2020-02-12 11:22:27 -08:00
// Cleanup appendIDs below 1000, which means the sample buffer is
// the only thing with appendIDs.
2020-07-30 04:48:47 -07:00
app = hb . appender ( )
2020-04-17 11:51:03 -07:00
app . cleanupAppendIDsBelow = 1000
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , int64 ( i ) , float64 ( i ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 999 , lastValue ( hb , 998 ) )
require . Equal ( t , 999 , lastValue ( hb , 999 ) )
require . Equal ( t , 1000 , lastValue ( hb , 1000 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1001 ) )
require . Equal ( t , 1002 , lastValue ( hb , 1002 ) )
require . Equal ( t , 1002 , lastValue ( hb , 1003 ) )
2020-02-12 11:22:27 -08:00
i ++
// Cleanup appendIDs below 1001, but with a rollback.
2020-07-30 04:48:47 -07:00
app = hb . appender ( )
2020-04-17 11:51:03 -07:00
app . cleanupAppendIDsBelow = 1001
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , int64 ( i ) , float64 ( i ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Rollback ( ) )
require . Equal ( t , 1000 , lastValue ( hb , 999 ) )
require . Equal ( t , 1000 , lastValue ( hb , 1000 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1001 ) )
require . Equal ( t , 1002 , lastValue ( hb , 1002 ) )
require . Equal ( t , 1002 , lastValue ( hb , 1003 ) )
2020-05-06 08:30:00 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2020-05-06 08:30:00 -07:00
// Test isolation with restart of Head. This is to verify the num samples of chunks after m-map chunk replay.
2020-07-21 01:39:02 -07:00
hb , w := newTestHead ( t , 1000 , false )
2020-05-06 08:30:00 -07:00
i = addSamples ( hb )
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2020-05-06 08:30:00 -07:00
wlog , err := wal . NewSize ( nil , nil , w . Dir ( ) , 32768 , false )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-09 06:12:48 -08:00
opts := DefaultHeadOptions ( )
opts . ChunkRange = 1000
opts . ChunkDirRoot = wlog . Dir ( )
2021-06-05 07:29:32 -07:00
hb , err = NewHead ( nil , nil , wlog , opts , nil )
2020-10-29 02:43:23 -07:00
defer func ( ) { require . NoError ( t , hb . Close ( ) ) } ( )
require . NoError ( t , err )
require . NoError ( t , hb . Init ( 0 ) )
2020-05-06 08:30:00 -07:00
// No appends after restarting. Hence all should return the last value.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1000 , lastValue ( hb , 10 ) )
require . Equal ( t , 1000 , lastValue ( hb , 130 ) )
require . Equal ( t , 1000 , lastValue ( hb , 160 ) )
require . Equal ( t , 1000 , lastValue ( hb , 240 ) )
require . Equal ( t , 1000 , lastValue ( hb , 500 ) )
2020-05-06 08:30:00 -07:00
// Cleanup appendIDs below 1000, which means the sample buffer is
// the only thing with appendIDs.
2020-07-30 04:48:47 -07:00
app = hb . appender ( )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , int64 ( i ) , float64 ( i ) )
2020-05-06 08:30:00 -07:00
i ++
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 1001 , lastValue ( hb , 998 ) )
require . Equal ( t , 1001 , lastValue ( hb , 999 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1000 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1001 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1002 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1003 ) )
2020-05-06 08:30:00 -07:00
// Cleanup appendIDs below 1002, but with a rollback.
2020-07-30 04:48:47 -07:00
app = hb . appender ( )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , int64 ( i ) , float64 ( i ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Rollback ( ) )
require . Equal ( t , 1001 , lastValue ( hb , 999 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1000 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1001 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1002 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1003 ) )
2020-02-17 10:37:09 -08:00
}
2020-02-12 11:22:27 -08:00
func TestIsolationRollback ( t * testing . T ) {
2021-11-19 02:11:32 -08:00
if defaultIsolationDisabled {
t . Skip ( "skipping test since tsdb isolation is disabled" )
}
2020-02-12 11:22:27 -08:00
// Rollback after a failed append and test if the low watermark has progressed anyway.
2020-07-21 01:39:02 -07:00
hb , _ := newTestHead ( t , 1000 , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2020-02-17 10:37:09 -08:00
2020-07-30 04:11:13 -07:00
app := hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , uint64 ( 1 ) , hb . iso . lowWatermark ( ) )
2020-02-17 10:37:09 -08:00
2020-07-30 04:11:13 -07:00
app = hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 1 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" , "foo" , "baz" ) , 2 , 2 )
2020-10-29 02:43:23 -07:00
require . Error ( t , err )
require . NoError ( t , app . Rollback ( ) )
require . Equal ( t , uint64 ( 2 ) , hb . iso . lowWatermark ( ) )
2020-02-17 10:37:09 -08:00
2020-07-30 04:11:13 -07:00
app = hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 3 , 3 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , uint64 ( 3 ) , hb . iso . lowWatermark ( ) , "Low watermark should proceed to 3 even if append #2 was rolled back." )
2020-02-12 11:22:27 -08:00
}
2020-02-17 10:37:09 -08:00
2020-02-12 11:22:27 -08:00
func TestIsolationLowWatermarkMonotonous ( t * testing . T ) {
2021-11-19 02:11:32 -08:00
if defaultIsolationDisabled {
t . Skip ( "skipping test since tsdb isolation is disabled" )
}
2020-07-21 01:39:02 -07:00
hb , _ := newTestHead ( t , 1000 , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2020-02-17 10:37:09 -08:00
2020-07-30 04:11:13 -07:00
app1 := hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app1 . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app1 . Commit ( ) )
require . Equal ( t , uint64 ( 1 ) , hb . iso . lowWatermark ( ) , "Low watermark should by 1 after 1st append." )
2020-02-12 11:22:27 -08:00
2020-07-30 04:11:13 -07:00
app1 = hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app1 . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 1 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , uint64 ( 2 ) , hb . iso . lowWatermark ( ) , "Low watermark should be two, even if append is not committed yet." )
2020-02-12 11:22:27 -08:00
2020-07-30 04:11:13 -07:00
app2 := hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app2 . Append ( 0 , labels . FromStrings ( "foo" , "baz" ) , 1 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app2 . Commit ( ) )
require . Equal ( t , uint64 ( 2 ) , hb . iso . lowWatermark ( ) , "Low watermark should stay two because app1 is not committed yet." )
2020-02-12 11:22:27 -08:00
2021-07-20 01:47:20 -07:00
is := hb . iso . State ( math . MinInt64 , math . MaxInt64 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , uint64 ( 2 ) , hb . iso . lowWatermark ( ) , "After simulated read (iso state retrieved), low watermark should stay at 2." )
2020-02-17 10:37:09 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , app1 . Commit ( ) )
require . Equal ( t , uint64 ( 2 ) , hb . iso . lowWatermark ( ) , "Even after app1 is committed, low watermark should stay at 2 because read is still ongoing." )
2020-01-21 11:30:20 -08:00
2020-02-12 11:22:27 -08:00
is . Close ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , uint64 ( 3 ) , hb . iso . lowWatermark ( ) , "After read has finished (iso state closed), low watermark should jump to three." )
2020-01-21 11:30:20 -08:00
}
2020-02-28 17:39:26 -08:00
func TestIsolationAppendIDZeroIsNoop ( t * testing . T ) {
2021-11-19 02:11:32 -08:00
if defaultIsolationDisabled {
t . Skip ( "skipping test since tsdb isolation is disabled" )
}
2020-07-21 01:39:02 -07:00
h , _ := newTestHead ( t , 1000 , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2020-02-28 17:39:26 -08:00
h . initTime ( 0 )
2020-05-20 06:22:08 -07:00
s , _ , _ := h . getOrCreate ( 1 , labels . FromStrings ( "a" , "1" ) )
2020-02-28 17:39:26 -08:00
2020-05-06 08:30:00 -07:00
ok , _ := s . append ( 0 , 0 , 0 , h . chunkDiskMapper )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "Series append failed." )
require . Equal ( t , 0 , s . txs . txIDCount , "Series should not have an appendID after append with appendID=0." )
2020-02-28 17:39:26 -08:00
}
2020-03-16 05:59:22 -07:00
func TestHeadSeriesChunkRace ( t * testing . T ) {
for i := 0 ; i < 1000 ; i ++ {
testHeadSeriesChunkRace ( t )
}
}
2020-04-17 11:51:03 -07:00
func TestIsolationWithoutAdd ( t * testing . T ) {
2021-11-19 02:11:32 -08:00
if defaultIsolationDisabled {
t . Skip ( "skipping test since tsdb isolation is disabled" )
}
2020-07-21 01:39:02 -07:00
hb , _ := newTestHead ( t , 1000 , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2020-04-17 11:51:03 -07:00
2020-07-30 04:11:13 -07:00
app := hb . Appender ( context . Background ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-04-17 11:51:03 -07:00
2020-07-30 04:11:13 -07:00
app = hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "baz" ) , 1 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2020-04-17 11:51:03 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , hb . iso . lastAppendID ( ) , hb . iso . lowWatermark ( ) , "High watermark should be equal to the low watermark" )
2020-04-17 11:51:03 -07:00
}
2020-05-06 08:30:00 -07:00
func TestOutOfOrderSamplesMetric ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2020-05-06 08:30:00 -07:00
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , nil , DefaultOptions ( ) , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
db . DisableCompactions ( )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2020-05-06 08:30:00 -07:00
for i := 1 ; i <= 5 ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , int64 ( i ) , 99 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-05-06 08:30:00 -07:00
// Test out of order metric.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples ) )
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 2 , 99 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfOrderSample , err )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples ) )
2020-05-06 08:30:00 -07:00
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 3 , 99 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfOrderSample , err )
require . Equal ( t , 2.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples ) )
2020-05-06 08:30:00 -07:00
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 4 , 99 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfOrderSample , err )
require . Equal ( t , 3.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples ) )
require . NoError ( t , app . Commit ( ) )
2020-05-06 08:30:00 -07:00
// Compact Head to test out of bound metric.
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , DefaultBlockDuration * 2 , 99 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2020-05-06 08:30:00 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( math . MinInt64 ) , db . head . minValidTime . Load ( ) )
require . NoError ( t , db . Compact ( ) )
require . Greater ( t , db . head . minValidTime . Load ( ) , int64 ( 0 ) )
2020-05-06 08:30:00 -07:00
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , db . head . minValidTime . Load ( ) - 2 , 99 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfBounds , err )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfBoundSamples ) )
2020-05-06 08:30:00 -07:00
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , db . head . minValidTime . Load ( ) - 1 , 99 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfBounds , err )
require . Equal ( t , 2.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfBoundSamples ) )
require . NoError ( t , app . Commit ( ) )
2020-05-06 08:30:00 -07:00
// Some more valid samples for out of order.
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2020-05-06 08:30:00 -07:00
for i := 1 ; i <= 5 ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , db . head . minValidTime . Load ( ) + DefaultBlockDuration + int64 ( i ) , 99 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-05-06 08:30:00 -07:00
// Test out of order metric.
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , db . head . minValidTime . Load ( ) + DefaultBlockDuration + 2 , 99 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfOrderSample , err )
require . Equal ( t , 4.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples ) )
2020-05-06 08:30:00 -07:00
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , db . head . minValidTime . Load ( ) + DefaultBlockDuration + 3 , 99 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfOrderSample , err )
require . Equal ( t , 5.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples ) )
2020-05-06 08:30:00 -07:00
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , db . head . minValidTime . Load ( ) + DefaultBlockDuration + 4 , 99 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfOrderSample , err )
require . Equal ( t , 6.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples ) )
require . NoError ( t , app . Commit ( ) )
2020-05-06 08:30:00 -07:00
}
func testHeadSeriesChunkRace ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
h , _ := newTestHead ( t , 1000 , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Init ( 0 ) )
2020-07-30 04:11:13 -07:00
app := h . Appender ( context . Background ( ) )
2020-03-16 05:59:22 -07:00
2021-02-18 04:07:00 -08:00
s2 , err := app . Append ( 0 , labels . FromStrings ( "foo2" , "bar" ) , 5 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-03-16 05:59:22 -07:00
for ts := int64 ( 6 ) ; ts < 11 ; ts ++ {
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( s2 , nil , ts , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-03-16 05:59:22 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-03-16 05:59:22 -07:00
var wg sync . WaitGroup
matcher := labels . MustNewMatcher ( labels . MatchEqual , "" , "" )
q , err := NewBlockQuerier ( h , 18 , 22 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-03-16 05:59:22 -07:00
defer q . Close ( )
wg . Add ( 1 )
go func ( ) {
h . updateMinMaxTime ( 20 , 25 )
h . gc ( )
wg . Done ( )
} ( )
2020-06-09 09:57:31 -07:00
ss := q . Select ( false , nil , matcher )
for ss . Next ( ) {
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , ss . Err ( ) )
2020-03-16 05:59:22 -07:00
wg . Wait ( )
}
2020-05-06 08:30:00 -07:00
2020-05-30 05:50:09 -07:00
func TestHeadLabelNamesValuesWithMinMaxRange ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
head , _ := newTestHead ( t , 1000 , false )
2020-05-30 05:50:09 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Close ( ) )
2020-05-30 05:50:09 -07:00
} ( )
const (
firstSeriesTimestamp int64 = 100
secondSeriesTimestamp int64 = 200
lastSeriesTimestamp int64 = 300
)
var (
2021-10-22 01:06:44 -07:00
seriesTimestamps = [ ] int64 {
firstSeriesTimestamp ,
2020-05-30 05:50:09 -07:00
secondSeriesTimestamp ,
lastSeriesTimestamp ,
}
expectedLabelNames = [ ] string { "a" , "b" , "c" }
expectedLabelValues = [ ] string { "d" , "e" , "f" }
)
2020-07-30 04:11:13 -07:00
app := head . Appender ( context . Background ( ) )
2020-05-30 05:50:09 -07:00
for i , name := range expectedLabelNames {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { { Name : name , Value : expectedLabelValues [ i ] } } , seriesTimestamps [ i ] , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-30 05:50:09 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , head . MinTime ( ) , firstSeriesTimestamp )
require . Equal ( t , head . MaxTime ( ) , lastSeriesTimestamp )
2020-05-30 05:50:09 -07:00
2021-10-22 01:06:44 -07:00
testCases := [ ] struct {
2020-05-30 05:50:09 -07:00
name string
mint int64
maxt int64
expectedNames [ ] string
expectedValues [ ] string
} {
{ "maxt less than head min" , head . MaxTime ( ) - 10 , head . MinTime ( ) - 10 , [ ] string { } , [ ] string { } } ,
{ "mint less than head max" , head . MaxTime ( ) + 10 , head . MinTime ( ) + 10 , [ ] string { } , [ ] string { } } ,
{ "mint and maxt outside head" , head . MaxTime ( ) + 10 , head . MinTime ( ) - 10 , [ ] string { } , [ ] string { } } ,
{ "mint and maxt within head" , head . MaxTime ( ) - 10 , head . MinTime ( ) + 10 , expectedLabelNames , expectedLabelValues } ,
}
for _ , tt := range testCases {
t . Run ( tt . name , func ( t * testing . T ) {
headIdxReader := head . indexRange ( tt . mint , tt . maxt )
actualLabelNames , err := headIdxReader . LabelNames ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , tt . expectedNames , actualLabelNames )
2020-05-30 05:50:09 -07:00
if len ( tt . expectedValues ) > 0 {
for i , name := range expectedLabelNames {
2020-06-25 06:10:29 -07:00
actualLabelValue , err := headIdxReader . SortedLabelValues ( name )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , [ ] string { tt . expectedValues [ i ] } , actualLabelValue )
2020-05-30 05:50:09 -07:00
}
}
} )
}
}
2020-07-22 02:57:38 -07:00
2021-02-09 09:38:35 -08:00
func TestHeadLabelValuesWithMatchers ( t * testing . T ) {
head , _ := newTestHead ( t , 1000 , false )
2021-07-20 05:38:08 -07:00
t . Cleanup ( func ( ) { require . NoError ( t , head . Close ( ) ) } )
2021-02-09 09:38:35 -08:00
app := head . Appender ( context . Background ( ) )
for i := 0 ; i < 100 ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels {
2021-02-09 09:38:35 -08:00
{ Name : "tens" , Value : fmt . Sprintf ( "value%d" , i / 10 ) } ,
2022-05-04 14:41:36 -07:00
{ Name : "unique" , Value : fmt . Sprintf ( "value%d" , i ) } ,
2021-02-09 09:38:35 -08:00
} , 100 , 0 )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
2021-10-22 01:06:44 -07:00
testCases := [ ] struct {
2021-02-09 09:38:35 -08:00
name string
labelName string
matchers [ ] * labels . Matcher
expectedValues [ ] string
} {
{
name : "get tens based on unique id" ,
labelName : "tens" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "unique" , "value35" ) } ,
expectedValues : [ ] string { "value3" } ,
} , {
name : "get unique ids based on a ten" ,
labelName : "unique" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "tens" , "value1" ) } ,
expectedValues : [ ] string { "value10" , "value11" , "value12" , "value13" , "value14" , "value15" , "value16" , "value17" , "value18" , "value19" } ,
} , {
name : "get tens by pattern matching on unique id" ,
labelName : "tens" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchRegexp , "unique" , "value[5-7]5" ) } ,
expectedValues : [ ] string { "value5" , "value6" , "value7" } ,
} , {
name : "get tens by matching for absence of unique label" ,
labelName : "tens" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchNotEqual , "unique" , "" ) } ,
expectedValues : [ ] string { "value0" , "value1" , "value2" , "value3" , "value4" , "value5" , "value6" , "value7" , "value8" , "value9" } ,
} ,
}
for _ , tt := range testCases {
t . Run ( tt . name , func ( t * testing . T ) {
headIdxReader := head . indexRange ( 0 , 200 )
actualValues , err := headIdxReader . SortedLabelValues ( tt . labelName , tt . matchers ... )
require . NoError ( t , err )
require . Equal ( t , tt . expectedValues , actualValues )
actualValues , err = headIdxReader . LabelValues ( tt . labelName , tt . matchers ... )
sort . Strings ( actualValues )
require . NoError ( t , err )
require . Equal ( t , tt . expectedValues , actualValues )
} )
}
}
2021-07-20 05:38:08 -07:00
func TestHeadLabelNamesWithMatchers ( t * testing . T ) {
head , _ := newTestHead ( t , 1000 , false )
defer func ( ) {
require . NoError ( t , head . Close ( ) )
} ( )
app := head . Appender ( context . Background ( ) )
for i := 0 ; i < 100 ; i ++ {
_ , err := app . Append ( 0 , labels . Labels {
{ Name : "unique" , Value : fmt . Sprintf ( "value%d" , i ) } ,
} , 100 , 0 )
require . NoError ( t , err )
if i % 10 == 0 {
_ , err := app . Append ( 0 , labels . Labels {
{ Name : "tens" , Value : fmt . Sprintf ( "value%d" , i / 10 ) } ,
2022-05-04 14:41:36 -07:00
{ Name : "unique" , Value : fmt . Sprintf ( "value%d" , i ) } ,
2021-07-20 05:38:08 -07:00
} , 100 , 0 )
require . NoError ( t , err )
}
if i % 20 == 0 {
_ , err := app . Append ( 0 , labels . Labels {
{ Name : "tens" , Value : fmt . Sprintf ( "value%d" , i / 10 ) } ,
{ Name : "twenties" , Value : fmt . Sprintf ( "value%d" , i / 20 ) } ,
2022-05-04 14:41:36 -07:00
{ Name : "unique" , Value : fmt . Sprintf ( "value%d" , i ) } ,
2021-07-20 05:38:08 -07:00
} , 100 , 0 )
require . NoError ( t , err )
}
}
require . NoError ( t , app . Commit ( ) )
testCases := [ ] struct {
name string
labelName string
matchers [ ] * labels . Matcher
expectedNames [ ] string
} {
{
name : "get with non-empty unique: all" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchNotEqual , "unique" , "" ) } ,
expectedNames : [ ] string { "tens" , "twenties" , "unique" } ,
} , {
name : "get with unique ending in 1: only unique" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchRegexp , "unique" , "value.*1" ) } ,
expectedNames : [ ] string { "unique" } ,
} , {
name : "get with unique = value20: all" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "unique" , "value20" ) } ,
expectedNames : [ ] string { "tens" , "twenties" , "unique" } ,
} , {
name : "get tens = 1: unique & tens" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "tens" , "value1" ) } ,
expectedNames : [ ] string { "tens" , "unique" } ,
} ,
}
for _ , tt := range testCases {
t . Run ( tt . name , func ( t * testing . T ) {
headIdxReader := head . indexRange ( 0 , 200 )
actualNames , err := headIdxReader . LabelNames ( tt . matchers ... )
require . NoError ( t , err )
require . Equal ( t , tt . expectedNames , actualNames )
} )
}
}
2020-07-22 02:57:38 -07:00
func TestErrReuseAppender ( t * testing . T ) {
head , _ := newTestHead ( t , 1000 , false )
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Close ( ) )
2020-07-22 02:57:38 -07:00
} ( )
2020-07-30 04:11:13 -07:00
app := head . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { { Name : "test" , Value : "test" } } , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Error ( t , app . Commit ( ) )
require . Error ( t , app . Rollback ( ) )
2020-07-22 02:57:38 -07:00
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . Labels { { Name : "test" , Value : "test" } } , 1 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Rollback ( ) )
require . Error ( t , app . Rollback ( ) )
require . Error ( t , app . Commit ( ) )
2020-07-22 02:57:38 -07:00
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . Labels { { Name : "test" , Value : "test" } } , 2 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Error ( t , app . Rollback ( ) )
require . Error ( t , app . Commit ( ) )
2020-07-22 02:57:38 -07:00
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . Labels { { Name : "test" , Value : "test" } } , 3 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Rollback ( ) )
require . Error ( t , app . Commit ( ) )
require . Error ( t , app . Rollback ( ) )
2020-07-22 02:57:38 -07:00
}
2020-11-25 05:03:30 -08:00
func TestHeadMintAfterTruncation ( t * testing . T ) {
chunkRange := int64 ( 2000 )
head , _ := newTestHead ( t , chunkRange , false )
app := head . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 100 , 100 )
2020-11-25 05:03:30 -08:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 4000 , 200 )
2020-11-25 05:03:30 -08:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 8000 , 300 )
2020-11-25 05:03:30 -08:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
// Truncating outside the appendable window and actual mint being outside
// appendable window should leave mint at the actual mint.
require . NoError ( t , head . Truncate ( 3500 ) )
require . Equal ( t , int64 ( 4000 ) , head . MinTime ( ) )
require . Equal ( t , int64 ( 4000 ) , head . minValidTime . Load ( ) )
2021-05-13 18:34:11 -07:00
// After truncation outside the appendable window if the actual min time
2020-11-25 05:03:30 -08:00
// is in the appendable window then we should leave mint at the start of appendable window.
require . NoError ( t , head . Truncate ( 5000 ) )
require . Equal ( t , head . appendableMinValidTime ( ) , head . MinTime ( ) )
require . Equal ( t , head . appendableMinValidTime ( ) , head . minValidTime . Load ( ) )
// If the truncation time is inside the appendable window, then the min time
// should be the truncation time.
require . NoError ( t , head . Truncate ( 7500 ) )
require . Equal ( t , int64 ( 7500 ) , head . MinTime ( ) )
require . Equal ( t , int64 ( 7500 ) , head . minValidTime . Load ( ) )
require . NoError ( t , head . Close ( ) )
2021-03-16 02:47:45 -07:00
}
func TestHeadExemplars ( t * testing . T ) {
chunkRange := int64 ( 2000 )
head , _ := newTestHead ( t , chunkRange , false )
app := head . Appender ( context . Background ( ) )
l := labels . FromStrings ( "traceId" , "123" )
// It is perfectly valid to add Exemplars before the current start time -
// histogram buckets that haven't been update in a while could still be
// exported exemplars from an hour ago.
ref , err := app . Append ( 0 , labels . Labels { { Name : "a" , Value : "b" } } , 100 , 100 )
require . NoError ( t , err )
_ , err = app . AppendExemplar ( ref , l , exemplar . Exemplar {
Labels : l ,
HasTs : true ,
Ts : - 1000 ,
Value : 1 ,
} )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . NoError ( t , head . Close ( ) )
2020-11-25 05:03:30 -08:00
}
2021-02-09 09:38:35 -08:00
func BenchmarkHeadLabelValuesWithMatchers ( b * testing . B ) {
chunkRange := int64 ( 2000 )
head , _ := newTestHead ( b , chunkRange , false )
b . Cleanup ( func ( ) { require . NoError ( b , head . Close ( ) ) } )
app := head . Appender ( context . Background ( ) )
metricCount := 1000000
for i := 0 ; i < metricCount ; i ++ {
2022-05-04 14:41:36 -07:00
// Note these series are not created in sort order: 'value2' sorts after 'value10'.
// This makes a big difference to the benchmark timing.
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels {
2022-05-04 14:41:36 -07:00
{ Name : "a_unique" , Value : fmt . Sprintf ( "value%d" , i ) } ,
{ Name : "b_tens" , Value : fmt . Sprintf ( "value%d" , i / ( metricCount / 10 ) ) } ,
{ Name : "c_ninety" , Value : fmt . Sprintf ( "value%d" , i / ( metricCount / 10 ) / 9 ) } , // "0" for the first 90%, then "1"
2021-02-09 09:38:35 -08:00
} , 100 , 0 )
require . NoError ( b , err )
}
require . NoError ( b , app . Commit ( ) )
headIdxReader := head . indexRange ( 0 , 200 )
2022-05-04 14:41:36 -07:00
matchers := [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "c_ninety" , "value0" ) }
2021-02-09 09:38:35 -08:00
b . ResetTimer ( )
b . ReportAllocs ( )
for benchIdx := 0 ; benchIdx < b . N ; benchIdx ++ {
2022-05-04 14:41:36 -07:00
actualValues , err := headIdxReader . LabelValues ( "b_tens" , matchers ... )
2021-02-09 09:38:35 -08:00
require . NoError ( b , err )
require . Equal ( b , 9 , len ( actualValues ) )
}
}
2021-04-26 15:43:22 -07:00
func TestMemSafeIteratorSeekIntoBuffer ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2021-04-26 15:43:22 -07:00
// This is usually taken from the Head, but passing manually here.
2022-01-10 05:36:45 -08:00
chunkDiskMapper , err := chunks . NewChunkDiskMapper ( nil , dir , chunkenc . NewPool ( ) , chunks . DefaultWriteBufferSize , chunks . DefaultWriteQueueSize )
2021-04-26 15:43:22 -07:00
require . NoError ( t , err )
defer func ( ) {
require . NoError ( t , chunkDiskMapper . Close ( ) )
} ( )
2021-11-19 02:11:32 -08:00
s := newMemSeries ( labels . Labels { } , 1 , 500 , nil , defaultIsolationDisabled )
2021-04-26 15:43:22 -07:00
for i := 0 ; i < 7 ; i ++ {
ok , _ := s . append ( int64 ( i ) , float64 ( i ) , 0 , chunkDiskMapper )
require . True ( t , ok , "sample append failed" )
}
2021-11-17 05:05:10 -08:00
it := s . iterator ( s . headChunkID ( len ( s . mmappedChunks ) ) , nil , chunkDiskMapper , nil )
2021-04-26 15:43:22 -07:00
_ , ok := it . ( * memSafeIterator )
require . True ( t , ok )
// First point.
ok = it . Seek ( 0 )
require . True ( t , ok )
ts , val := it . At ( )
require . Equal ( t , int64 ( 0 ) , ts )
require . Equal ( t , float64 ( 0 ) , val )
// Advance one point.
ok = it . Next ( )
require . True ( t , ok )
ts , val = it . At ( )
require . Equal ( t , int64 ( 1 ) , ts )
require . Equal ( t , float64 ( 1 ) , val )
// Seeking an older timestamp shouldn't cause the iterator to go backwards.
ok = it . Seek ( 0 )
require . True ( t , ok )
ts , val = it . At ( )
require . Equal ( t , int64 ( 1 ) , ts )
require . Equal ( t , float64 ( 1 ) , val )
// Seek into the buffer.
ok = it . Seek ( 3 )
require . True ( t , ok )
ts , val = it . At ( )
require . Equal ( t , int64 ( 3 ) , ts )
require . Equal ( t , float64 ( 3 ) , val )
// Iterate through the rest of the buffer.
for i := 4 ; i < 7 ; i ++ {
ok = it . Next ( )
require . True ( t , ok )
ts , val = it . At ( )
require . Equal ( t , int64 ( i ) , ts )
require . Equal ( t , float64 ( i ) , val )
}
// Run out of elements in the iterator.
ok = it . Next ( )
require . False ( t , ok )
ok = it . Seek ( 7 )
require . False ( t , ok )
}
2021-07-20 01:47:20 -07:00
// Tests https://github.com/prometheus/prometheus/issues/8221.
func TestChunkNotFoundHeadGCRace ( t * testing . T ) {
db := newTestDB ( t )
db . DisableCompactions ( )
var (
app = db . Appender ( context . Background ( ) )
2021-11-06 03:10:04 -07:00
ref = storage . SeriesRef ( 0 )
2021-07-20 01:47:20 -07:00
mint , maxt = int64 ( 0 ) , int64 ( 0 )
err error
)
// Appends samples to span over 1.5 block ranges.
// 7 chunks with 15s scrape interval.
for i := int64 ( 0 ) ; i <= 120 * 7 ; i ++ {
ts := i * DefaultBlockDuration / ( 4 * 120 )
ref , err = app . Append ( ref , labels . FromStrings ( "a" , "b" ) , ts , float64 ( i ) )
require . NoError ( t , err )
maxt = ts
}
require . NoError ( t , app . Commit ( ) )
// Get a querier before compaction (or when compaction is about to begin).
q , err := db . Querier ( context . Background ( ) , mint , maxt )
require . NoError ( t , err )
// Query the compacted range and get the first series before compaction.
ss := q . Select ( true , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
require . True ( t , ss . Next ( ) )
s := ss . At ( )
var wg sync . WaitGroup
wg . Add ( 1 )
go func ( ) {
defer wg . Done ( )
// Compacting head while the querier spans the compaction time.
require . NoError ( t , db . Compact ( ) )
require . Greater ( t , len ( db . Blocks ( ) ) , 0 )
} ( )
// Give enough time for compaction to finish.
// We expect it to be blocked until querier is closed.
<- time . After ( 3 * time . Second )
// Now consume after compaction when it's gone.
it := s . Iterator ( )
for it . Next ( ) {
_ , _ = it . At ( )
}
// It should error here without any fix for the mentioned issue.
require . NoError ( t , it . Err ( ) )
for ss . Next ( ) {
s = ss . At ( )
it := s . Iterator ( )
for it . Next ( ) {
_ , _ = it . At ( )
}
require . NoError ( t , it . Err ( ) )
}
require . NoError ( t , ss . Err ( ) )
require . NoError ( t , q . Close ( ) )
wg . Wait ( )
}
// Tests https://github.com/prometheus/prometheus/issues/9079.
func TestDataMissingOnQueryDuringCompaction ( t * testing . T ) {
db := newTestDB ( t )
db . DisableCompactions ( )
var (
app = db . Appender ( context . Background ( ) )
2021-11-06 03:10:04 -07:00
ref = storage . SeriesRef ( 0 )
2021-07-20 01:47:20 -07:00
mint , maxt = int64 ( 0 ) , int64 ( 0 )
err error
)
// Appends samples to span over 1.5 block ranges.
expSamples := make ( [ ] tsdbutil . Sample , 0 )
// 7 chunks with 15s scrape interval.
for i := int64 ( 0 ) ; i <= 120 * 7 ; i ++ {
ts := i * DefaultBlockDuration / ( 4 * 120 )
ref , err = app . Append ( ref , labels . FromStrings ( "a" , "b" ) , ts , float64 ( i ) )
require . NoError ( t , err )
maxt = ts
expSamples = append ( expSamples , sample { ts , float64 ( i ) } )
}
require . NoError ( t , app . Commit ( ) )
// Get a querier before compaction (or when compaction is about to begin).
q , err := db . Querier ( context . Background ( ) , mint , maxt )
require . NoError ( t , err )
var wg sync . WaitGroup
wg . Add ( 1 )
go func ( ) {
defer wg . Done ( )
// Compacting head while the querier spans the compaction time.
require . NoError ( t , db . Compact ( ) )
require . Greater ( t , len ( db . Blocks ( ) ) , 0 )
} ( )
// Give enough time for compaction to finish.
// We expect it to be blocked until querier is closed.
<- time . After ( 3 * time . Second )
// Querying the querier that was got before compaction.
series := query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
require . Equal ( t , map [ string ] [ ] tsdbutil . Sample { ` { a="b"} ` : expSamples } , series )
wg . Wait ( )
}
func TestIsQuerierCollidingWithTruncation ( t * testing . T ) {
db := newTestDB ( t )
db . DisableCompactions ( )
var (
app = db . Appender ( context . Background ( ) )
2021-11-06 03:10:04 -07:00
ref = storage . SeriesRef ( 0 )
2021-07-20 01:47:20 -07:00
err error
)
for i := int64 ( 0 ) ; i <= 3000 ; i ++ {
ref , err = app . Append ( ref , labels . FromStrings ( "a" , "b" ) , i , float64 ( i ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
// This mocks truncation.
db . head . memTruncationInProcess . Store ( true )
db . head . lastMemoryTruncationTime . Store ( 2000 )
// Test that IsQuerierValid suggests correct querier ranges.
cases := [ ] struct {
mint , maxt int64 // For the querier.
expShouldClose , expGetNew bool
expNewMint int64
} {
{ - 200 , - 100 , true , false , 0 } ,
{ - 200 , 300 , true , false , 0 } ,
{ 100 , 1900 , true , false , 0 } ,
{ 1900 , 2200 , true , true , 2000 } ,
{ 2000 , 2500 , false , false , 0 } ,
}
for _ , c := range cases {
t . Run ( fmt . Sprintf ( "mint=%d,maxt=%d" , c . mint , c . maxt ) , func ( t * testing . T ) {
shouldClose , getNew , newMint := db . head . IsQuerierCollidingWithTruncation ( c . mint , c . maxt )
require . Equal ( t , c . expShouldClose , shouldClose )
require . Equal ( t , c . expGetNew , getNew )
if getNew {
require . Equal ( t , c . expNewMint , newMint )
}
} )
}
}
func TestWaitForPendingReadersInTimeRange ( t * testing . T ) {
db := newTestDB ( t )
db . DisableCompactions ( )
sampleTs := func ( i int64 ) int64 { return i * DefaultBlockDuration / ( 4 * 120 ) }
var (
app = db . Appender ( context . Background ( ) )
2021-11-06 03:10:04 -07:00
ref = storage . SeriesRef ( 0 )
2021-07-20 01:47:20 -07:00
err error
)
for i := int64 ( 0 ) ; i <= 3000 ; i ++ {
ts := sampleTs ( i )
ref , err = app . Append ( ref , labels . FromStrings ( "a" , "b" ) , ts , float64 ( i ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
truncMint , truncMaxt := int64 ( 1000 ) , int64 ( 2000 )
cases := [ ] struct {
mint , maxt int64
shouldWait bool
} {
{ 0 , 500 , false } , // Before truncation range.
{ 500 , 1500 , true } , // Overlaps with truncation at the start.
{ 1200 , 1700 , true } , // Within truncation range.
{ 1800 , 2500 , true } , // Overlaps with truncation at the end.
{ 2000 , 2500 , false } , // After truncation range.
{ 2100 , 2500 , false } , // After truncation range.
}
for _ , c := range cases {
t . Run ( fmt . Sprintf ( "mint=%d,maxt=%d,shouldWait=%t" , c . mint , c . maxt , c . shouldWait ) , func ( t * testing . T ) {
checkWaiting := func ( cl io . Closer ) {
var waitOver atomic . Bool
go func ( ) {
db . head . WaitForPendingReadersInTimeRange ( truncMint , truncMaxt )
waitOver . Store ( true )
} ( )
<- time . After ( 550 * time . Millisecond )
require . Equal ( t , ! c . shouldWait , waitOver . Load ( ) )
require . NoError ( t , cl . Close ( ) )
<- time . After ( 550 * time . Millisecond )
require . True ( t , waitOver . Load ( ) )
}
q , err := db . Querier ( context . Background ( ) , c . mint , c . maxt )
require . NoError ( t , err )
checkWaiting ( q )
cq , err := db . ChunkQuerier ( context . Background ( ) , c . mint , c . maxt )
require . NoError ( t , err )
checkWaiting ( cq )
} )
}
}
2021-08-06 09:51:01 -07:00
func TestChunkSnapshot ( t * testing . T ) {
head , _ := newTestHead ( t , 120 * 4 , false )
defer func ( ) {
head . opts . EnableMemorySnapshotOnShutdown = false
require . NoError ( t , head . Close ( ) )
} ( )
2021-08-30 07:04:38 -07:00
type ex struct {
seriesLabels labels . Labels
e exemplar . Exemplar
}
2021-08-06 09:51:01 -07:00
numSeries := 10
expSeries := make ( map [ string ] [ ] tsdbutil . Sample )
2021-11-06 03:10:04 -07:00
expTombstones := make ( map [ storage . SeriesRef ] tombstones . Intervals )
2021-08-30 07:04:38 -07:00
expExemplars := make ( [ ] ex , 0 )
2021-11-06 03:10:04 -07:00
addExemplar := func ( app storage . Appender , ref storage . SeriesRef , lbls labels . Labels , ts int64 ) {
2021-08-30 07:04:38 -07:00
e := ex {
seriesLabels : lbls ,
e : exemplar . Exemplar {
Labels : labels . Labels { { Name : "traceID" , Value : fmt . Sprintf ( "%d" , rand . Int ( ) ) } } ,
Value : rand . Float64 ( ) ,
Ts : ts ,
} ,
}
expExemplars = append ( expExemplars , e )
_ , err := app . AppendExemplar ( ref , e . seriesLabels , e . e )
require . NoError ( t , err )
}
checkSamples := func ( ) {
q , err := NewBlockQuerier ( head , math . MinInt64 , math . MaxInt64 )
require . NoError ( t , err )
series := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , ".*" ) )
require . Equal ( t , expSeries , series )
}
checkTombstones := func ( ) {
tr , err := head . Tombstones ( )
require . NoError ( t , err )
2021-11-06 03:10:04 -07:00
actTombstones := make ( map [ storage . SeriesRef ] tombstones . Intervals )
require . NoError ( t , tr . Iter ( func ( ref storage . SeriesRef , itvs tombstones . Intervals ) error {
2021-08-30 07:04:38 -07:00
for _ , itv := range itvs {
actTombstones [ ref ] . Add ( itv )
}
return nil
} ) )
require . Equal ( t , expTombstones , actTombstones )
}
checkExemplars := func ( ) {
actExemplars := make ( [ ] ex , 0 , len ( expExemplars ) )
err := head . exemplars . IterateExemplars ( func ( seriesLabels labels . Labels , e exemplar . Exemplar ) error {
actExemplars = append ( actExemplars , ex {
seriesLabels : seriesLabels ,
e : e ,
} )
return nil
} )
require . NoError ( t , err )
// Verifies both existence of right exemplars and order of exemplars in the buffer.
require . Equal ( t , expExemplars , actExemplars )
}
2021-09-08 07:23:44 -07:00
var (
wlast , woffset int
err error
)
closeHeadAndCheckSnapshot := func ( ) {
require . NoError ( t , head . Close ( ) )
_ , sidx , soffset , err := LastChunkSnapshot ( head . opts . ChunkDirRoot )
require . NoError ( t , err )
require . Equal ( t , wlast , sidx )
require . Equal ( t , woffset , soffset )
}
openHeadAndCheckReplay := func ( ) {
w , err := wal . NewSize ( nil , nil , head . wal . Dir ( ) , 32768 , false )
require . NoError ( t , err )
head , err = NewHead ( nil , nil , w , head . opts , nil )
require . NoError ( t , err )
require . NoError ( t , head . Init ( math . MinInt64 ) )
checkSamples ( )
checkTombstones ( )
checkExemplars ( )
}
2021-08-06 09:51:01 -07:00
{ // Initial data that goes into snapshot.
// Add some initial samples with >=1 m-map chunk.
app := head . Appender ( context . Background ( ) )
for i := 1 ; i <= numSeries ; i ++ {
lbls := labels . Labels { labels . Label { Name : "foo" , Value : fmt . Sprintf ( "bar%d" , i ) } }
lblStr := lbls . String ( )
2021-08-17 10:08:16 -07:00
// Should m-map at least 1 chunk.
for ts := int64 ( 1 ) ; ts <= 200 ; ts ++ {
2021-08-06 09:51:01 -07:00
val := rand . Float64 ( )
expSeries [ lblStr ] = append ( expSeries [ lblStr ] , sample { ts , val } )
2021-08-30 07:04:38 -07:00
ref , err := app . Append ( 0 , lbls , ts , val )
2021-08-06 09:51:01 -07:00
require . NoError ( t , err )
2021-08-17 10:08:16 -07:00
2021-08-30 07:04:38 -07:00
// Add an exemplar and to create multiple WAL records.
2021-08-17 10:08:16 -07:00
if ts % 10 == 0 {
2021-08-30 07:04:38 -07:00
addExemplar ( app , ref , lbls , ts )
2021-08-17 10:08:16 -07:00
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
}
2021-08-06 09:51:01 -07:00
}
}
require . NoError ( t , app . Commit ( ) )
// Add some tombstones.
var enc record . Encoder
for i := 1 ; i <= numSeries ; i ++ {
2021-11-06 03:10:04 -07:00
ref := storage . SeriesRef ( i )
2021-08-06 09:51:01 -07:00
itvs := tombstones . Intervals {
{ Mint : 1234 , Maxt : 2345 } ,
{ Mint : 3456 , Maxt : 4567 } ,
}
for _ , itv := range itvs {
expTombstones [ ref ] . Add ( itv )
}
head . tombstones . AddInterval ( ref , itvs ... )
err := head . wal . Log ( enc . Tombstones ( [ ] tombstones . Stone {
{ Ref : ref , Intervals : itvs } ,
} , nil ) )
require . NoError ( t , err )
}
2021-08-30 07:04:38 -07:00
2021-08-06 09:51:01 -07:00
}
// These references should be the ones used for the snapshot.
2021-09-08 07:23:44 -07:00
wlast , woffset , err = head . wal . LastSegmentAndOffset ( )
2021-08-06 09:51:01 -07:00
require . NoError ( t , err )
2021-09-13 06:00:41 -07:00
if woffset != 0 && woffset < 32 * 1024 {
// The page is always filled before taking the snapshot.
woffset = 32 * 1024
}
2021-08-06 09:51:01 -07:00
2021-09-08 07:23:44 -07:00
{
// Creating snapshot and verifying it.
2021-08-06 09:51:01 -07:00
head . opts . EnableMemorySnapshotOnShutdown = true
2021-09-08 07:23:44 -07:00
closeHeadAndCheckSnapshot ( ) // This will create a snapshot.
2021-08-06 09:51:01 -07:00
2021-09-08 07:23:44 -07:00
// Test the replay of snapshot.
openHeadAndCheckReplay ( )
2021-08-06 09:51:01 -07:00
}
{ // Additional data to only include in WAL and m-mapped chunks and not snapshot. This mimics having an old snapshot on disk.
// Add more samples.
app := head . Appender ( context . Background ( ) )
for i := 1 ; i <= numSeries ; i ++ {
lbls := labels . Labels { labels . Label { Name : "foo" , Value : fmt . Sprintf ( "bar%d" , i ) } }
lblStr := lbls . String ( )
2021-08-17 10:08:16 -07:00
// Should m-map at least 1 chunk.
for ts := int64 ( 201 ) ; ts <= 400 ; ts ++ {
2021-08-06 09:51:01 -07:00
val := rand . Float64 ( )
expSeries [ lblStr ] = append ( expSeries [ lblStr ] , sample { ts , val } )
2021-08-30 07:04:38 -07:00
ref , err := app . Append ( 0 , lbls , ts , val )
2021-08-06 09:51:01 -07:00
require . NoError ( t , err )
2021-08-17 10:08:16 -07:00
2021-08-30 07:04:38 -07:00
// Add an exemplar and to create multiple WAL records.
2021-08-17 10:08:16 -07:00
if ts % 10 == 0 {
2021-08-30 07:04:38 -07:00
addExemplar ( app , ref , lbls , ts )
2021-08-17 10:08:16 -07:00
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
}
2021-08-06 09:51:01 -07:00
}
}
require . NoError ( t , app . Commit ( ) )
// Add more tombstones.
var enc record . Encoder
for i := 1 ; i <= numSeries ; i ++ {
2021-11-06 03:10:04 -07:00
ref := storage . SeriesRef ( i )
2021-08-06 09:51:01 -07:00
itvs := tombstones . Intervals {
{ Mint : 12345 , Maxt : 23456 } ,
{ Mint : 34567 , Maxt : 45678 } ,
}
for _ , itv := range itvs {
expTombstones [ ref ] . Add ( itv )
}
head . tombstones . AddInterval ( ref , itvs ... )
err := head . wal . Log ( enc . Tombstones ( [ ] tombstones . Stone {
{ Ref : ref , Intervals : itvs } ,
} , nil ) )
require . NoError ( t , err )
}
}
2021-09-08 07:23:44 -07:00
{
// Close Head and verify that new snapshot was not created.
2021-08-06 09:51:01 -07:00
head . opts . EnableMemorySnapshotOnShutdown = false
2021-09-08 07:23:44 -07:00
closeHeadAndCheckSnapshot ( ) // This should not create a snapshot.
2021-08-06 09:51:01 -07:00
2021-09-08 07:23:44 -07:00
// Test the replay of snapshot, m-map chunks, and WAL.
head . opts . EnableMemorySnapshotOnShutdown = true // Enabled to read from snapshot.
openHeadAndCheckReplay ( )
2021-08-06 09:51:01 -07:00
}
2021-09-08 07:23:44 -07:00
// Creating another snapshot should delete the older snapshot and replay still works fine.
wlast , woffset , err = head . wal . LastSegmentAndOffset ( )
require . NoError ( t , err )
2021-09-13 06:00:41 -07:00
if woffset != 0 && woffset < 32 * 1024 {
// The page is always filled before taking the snapshot.
woffset = 32 * 1024
}
2021-09-08 07:23:44 -07:00
{
// Close Head and verify that new snapshot was created.
closeHeadAndCheckSnapshot ( )
// Verify that there is only 1 snapshot.
2022-04-27 02:24:36 -07:00
files , err := os . ReadDir ( head . opts . ChunkDirRoot )
2021-08-06 09:51:01 -07:00
require . NoError ( t , err )
2021-09-08 07:23:44 -07:00
snapshots := 0
for i := len ( files ) - 1 ; i >= 0 ; i -- {
fi := files [ i ]
if strings . HasPrefix ( fi . Name ( ) , chunkSnapshotPrefix ) {
snapshots ++
require . Equal ( t , chunkSnapshotDir ( wlast , woffset ) , fi . Name ( ) )
}
}
require . Equal ( t , 1 , snapshots )
2021-08-06 09:51:01 -07:00
2021-09-08 07:23:44 -07:00
// Test the replay of snapshot.
head . opts . EnableMemorySnapshotOnShutdown = true // Enabled to read from snapshot.
2021-10-04 22:21:25 -07:00
// Disabling exemplars to check that it does not hard fail replay
// https://github.com/prometheus/prometheus/issues/9437#issuecomment-933285870.
head . opts . EnableExemplarStorage = false
head . opts . MaxExemplars . Store ( 0 )
expExemplars = expExemplars [ : 0 ]
2021-09-08 07:23:44 -07:00
openHeadAndCheckReplay ( )
2021-10-04 22:21:25 -07:00
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( head . metrics . snapshotReplayErrorTotal ) )
2021-08-06 09:51:01 -07:00
}
}
2021-08-17 10:08:16 -07:00
func TestSnapshotError ( t * testing . T ) {
head , _ := newTestHead ( t , 120 * 4 , false )
defer func ( ) {
head . opts . EnableMemorySnapshotOnShutdown = false
require . NoError ( t , head . Close ( ) )
} ( )
// Add a sample.
app := head . Appender ( context . Background ( ) )
lbls := labels . Labels { labels . Label { Name : "foo" , Value : "bar" } }
_ , err := app . Append ( 0 , lbls , 99 , 99 )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
// Add some tombstones.
itvs := tombstones . Intervals {
{ Mint : 1234 , Maxt : 2345 } ,
{ Mint : 3456 , Maxt : 4567 } ,
}
head . tombstones . AddInterval ( 1 , itvs ... )
2022-03-03 09:11:19 -08:00
// Check existence of data.
2021-08-17 10:08:16 -07:00
require . NotNil ( t , head . series . getByHash ( lbls . Hash ( ) , lbls ) )
tm , err := head . tombstones . Get ( 1 )
require . NoError ( t , err )
require . NotEqual ( t , 0 , len ( tm ) )
head . opts . EnableMemorySnapshotOnShutdown = true
require . NoError ( t , head . Close ( ) ) // This will create a snapshot.
// Remove the WAL so that we don't load from it.
require . NoError ( t , os . RemoveAll ( head . wal . Dir ( ) ) )
// Corrupt the snapshot.
snapDir , _ , _ , err := LastChunkSnapshot ( head . opts . ChunkDirRoot )
require . NoError ( t , err )
2022-04-27 02:24:36 -07:00
files , err := os . ReadDir ( snapDir )
2021-08-17 10:08:16 -07:00
require . NoError ( t , err )
f , err := os . OpenFile ( path . Join ( snapDir , files [ 0 ] . Name ( ) ) , os . O_RDWR , 0 )
require . NoError ( t , err )
_ , err = f . WriteAt ( [ ] byte { 0b11111111 } , 18 )
require . NoError ( t , err )
require . NoError ( t , f . Close ( ) )
// Create new Head which should replay this snapshot.
w , err := wal . NewSize ( nil , nil , head . wal . Dir ( ) , 32768 , false )
require . NoError ( t , err )
2021-10-04 22:21:25 -07:00
// Testing https://github.com/prometheus/prometheus/issues/9437 with the registry.
head , err = NewHead ( prometheus . NewRegistry ( ) , nil , w , head . opts , nil )
2021-08-17 10:08:16 -07:00
require . NoError ( t , err )
require . NoError ( t , head . Init ( math . MinInt64 ) )
// There should be no series in the memory after snapshot error since WAL was removed.
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( head . metrics . snapshotReplayErrorTotal ) )
require . Nil ( t , head . series . getByHash ( lbls . Hash ( ) , lbls ) )
tm , err = head . tombstones . Get ( 1 )
require . NoError ( t , err )
require . Equal ( t , 0 , len ( tm ) )
}
2021-12-08 07:32:14 -08:00
// Tests https://github.com/prometheus/prometheus/issues/9725.
func TestChunkSnapshotReplayBug ( t * testing . T ) {
dir := t . TempDir ( )
wlog , err := wal . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , true )
require . NoError ( t , err )
// Write few series records and samples such that the series references are not in order in the WAL
// for status_code="200".
var buf [ ] byte
for i := 1 ; i <= 1000 ; i ++ {
var ref chunks . HeadSeriesRef
if i <= 500 {
ref = chunks . HeadSeriesRef ( i * 100 )
} else {
ref = chunks . HeadSeriesRef ( ( i - 500 ) * 50 )
}
seriesRec := record . RefSeries {
Ref : ref ,
Labels : labels . Labels {
{ Name : "__name__" , Value : "request_duration" } ,
{ Name : "status_code" , Value : "200" } ,
{ Name : "foo" , Value : fmt . Sprintf ( "baz%d" , rand . Int ( ) ) } ,
} ,
}
// Add a sample so that the series is not garbage collected.
samplesRec := record . RefSample { Ref : ref , T : 1000 , V : 1000 }
var enc record . Encoder
rec := enc . Series ( [ ] record . RefSeries { seriesRec } , buf )
buf = rec [ : 0 ]
require . NoError ( t , wlog . Log ( rec ) )
rec = enc . Samples ( [ ] record . RefSample { samplesRec } , buf )
buf = rec [ : 0 ]
require . NoError ( t , wlog . Log ( rec ) )
}
// Write a corrupt snapshot to fail the replay on startup.
snapshotName := chunkSnapshotDir ( 0 , 100 )
cpdir := filepath . Join ( dir , snapshotName )
require . NoError ( t , os . MkdirAll ( cpdir , 0 o777 ) )
2022-04-27 02:24:36 -07:00
err = os . WriteFile ( filepath . Join ( cpdir , "00000000" ) , [ ] byte { 1 , 5 , 3 , 5 , 6 , 7 , 4 , 2 , 2 } , 0 o777 )
2021-12-08 07:32:14 -08:00
require . NoError ( t , err )
opts := DefaultHeadOptions ( )
opts . ChunkDirRoot = dir
opts . EnableMemorySnapshotOnShutdown = true
head , err := NewHead ( nil , nil , wlog , opts , nil )
require . NoError ( t , err )
require . NoError ( t , head . Init ( math . MinInt64 ) )
defer func ( ) {
require . NoError ( t , head . Close ( ) )
} ( )
// Snapshot replay should error out.
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( head . metrics . snapshotReplayErrorTotal ) )
// Querying `request_duration{status_code!="200"}` should return no series since all of
// them have status_code="200".
q , err := NewBlockQuerier ( head , math . MinInt64 , math . MaxInt64 )
require . NoError ( t , err )
series := query ( t , q ,
labels . MustNewMatcher ( labels . MatchEqual , "__name__" , "request_duration" ) ,
labels . MustNewMatcher ( labels . MatchNotEqual , "status_code" , "200" ) ,
)
require . Len ( t , series , 0 , "there should be no series found" )
}
func TestChunkSnapshotTakenAfterIncompleteSnapshot ( t * testing . T ) {
dir := t . TempDir ( )
wlog , err := wal . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , true )
require . NoError ( t , err )
// Write a snapshot with .tmp suffix. This used to fail taking any further snapshots or replay of snapshots.
snapshotName := chunkSnapshotDir ( 0 , 100 ) + ".tmp"
cpdir := filepath . Join ( dir , snapshotName )
require . NoError ( t , os . MkdirAll ( cpdir , 0 o777 ) )
opts := DefaultHeadOptions ( )
opts . ChunkDirRoot = dir
opts . EnableMemorySnapshotOnShutdown = true
head , err := NewHead ( nil , nil , wlog , opts , nil )
require . NoError ( t , err )
require . NoError ( t , head . Init ( math . MinInt64 ) )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( head . metrics . snapshotReplayErrorTotal ) )
// Add some samples for the snapshot.
app := head . Appender ( context . Background ( ) )
_ , err = app . Append ( 0 , labels . Labels { { Name : "foo" , Value : "bar" } } , 10 , 10 )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
// Should not return any error for a successful snapshot.
require . NoError ( t , head . Close ( ) )
// Verify the snapshot.
name , idx , offset , err := LastChunkSnapshot ( dir )
require . NoError ( t , err )
require . True ( t , name != "" )
require . Equal ( t , 0 , idx )
require . Greater ( t , offset , 0 )
}
2022-02-22 07:05:15 -08:00
// Tests https://github.com/prometheus/prometheus/issues/10277.
func TestMmapPanicAfterMmapReplayCorruption ( t * testing . T ) {
dir := t . TempDir ( )
wlog , err := wal . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , false )
require . NoError ( t , err )
opts := DefaultHeadOptions ( )
opts . ChunkRange = DefaultBlockDuration
opts . ChunkDirRoot = dir
opts . EnableExemplarStorage = true
opts . MaxExemplars . Store ( config . DefaultExemplarsConfig . MaxExemplars )
h , err := NewHead ( nil , nil , wlog , opts , nil )
require . NoError ( t , err )
require . NoError ( t , h . Init ( 0 ) )
lastTs := int64 ( 0 )
var ref storage . SeriesRef
lbls := labels . FromStrings ( "__name__" , "testing" , "foo" , "bar" )
addChunks := func ( ) {
interval := DefaultBlockDuration / ( 4 * 120 )
app := h . Appender ( context . Background ( ) )
for i := 0 ; i < 250 ; i ++ {
ref , err = app . Append ( ref , lbls , lastTs , float64 ( lastTs ) )
lastTs += interval
if i % 10 == 0 {
require . NoError ( t , app . Commit ( ) )
app = h . Appender ( context . Background ( ) )
}
}
require . NoError ( t , app . Commit ( ) )
}
addChunks ( )
require . NoError ( t , h . Close ( ) )
wlog , err = wal . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , false )
require . NoError ( t , err )
mmapFilePath := filepath . Join ( dir , "chunks_head" , "000001" )
f , err := os . OpenFile ( mmapFilePath , os . O_WRONLY , 0 o666 )
require . NoError ( t , err )
_ , err = f . WriteAt ( [ ] byte { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 } , 17 )
require . NoError ( t , err )
require . NoError ( t , f . Close ( ) )
h , err = NewHead ( nil , nil , wlog , opts , nil )
require . NoError ( t , err )
require . NoError ( t , h . Init ( 0 ) )
addChunks ( )
require . NoError ( t , h . Close ( ) )
}
2022-02-25 07:53:40 -08:00
// Tests https://github.com/prometheus/prometheus/issues/10277.
func TestReplayAfterMmapReplayError ( t * testing . T ) {
dir := t . TempDir ( )
var h * Head
var err error
openHead := func ( ) {
wlog , err := wal . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , false )
require . NoError ( t , err )
opts := DefaultHeadOptions ( )
opts . ChunkRange = DefaultBlockDuration
opts . ChunkDirRoot = dir
opts . EnableMemorySnapshotOnShutdown = true
opts . MaxExemplars . Store ( config . DefaultExemplarsConfig . MaxExemplars )
h , err = NewHead ( nil , nil , wlog , opts , nil )
require . NoError ( t , err )
require . NoError ( t , h . Init ( 0 ) )
}
openHead ( )
itvl := int64 ( 15 * time . Second / time . Millisecond )
lastTs := int64 ( 0 )
lbls := labels . FromStrings ( "__name__" , "testing" , "foo" , "bar" )
var expSamples [ ] tsdbutil . Sample
addSamples := func ( numSamples int ) {
app := h . Appender ( context . Background ( ) )
var ref storage . SeriesRef
for i := 0 ; i < numSamples ; i ++ {
ref , err = app . Append ( ref , lbls , lastTs , float64 ( lastTs ) )
expSamples = append ( expSamples , sample { t : lastTs , v : float64 ( lastTs ) } )
require . NoError ( t , err )
lastTs += itvl
if i % 10 == 0 {
require . NoError ( t , app . Commit ( ) )
app = h . Appender ( context . Background ( ) )
}
}
require . NoError ( t , app . Commit ( ) )
}
// Creating multiple m-map files.
for i := 0 ; i < 5 ; i ++ {
addSamples ( 250 )
require . NoError ( t , h . Close ( ) )
if i != 4 {
// Don't open head for the last iteration.
openHead ( )
}
}
2022-04-27 02:24:36 -07:00
files , err := os . ReadDir ( filepath . Join ( dir , "chunks_head" ) )
2022-02-25 07:53:40 -08:00
require . Equal ( t , 5 , len ( files ) )
// Corrupt a m-map file.
mmapFilePath := filepath . Join ( dir , "chunks_head" , "000002" )
f , err := os . OpenFile ( mmapFilePath , os . O_WRONLY , 0 o666 )
require . NoError ( t , err )
_ , err = f . WriteAt ( [ ] byte { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 } , 17 )
require . NoError ( t , err )
require . NoError ( t , f . Close ( ) )
openHead ( )
// There should be less m-map files due to corruption.
2022-04-27 02:24:36 -07:00
files , err = os . ReadDir ( filepath . Join ( dir , "chunks_head" ) )
2022-02-25 07:53:40 -08:00
require . Equal ( t , 2 , len ( files ) )
// Querying should not panic.
q , err := NewBlockQuerier ( h , 0 , lastTs )
require . NoError ( t , err )
res := query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "__name__" , "testing" ) )
require . Equal ( t , map [ string ] [ ] tsdbutil . Sample { lbls . String ( ) : expSamples } , res )
require . NoError ( t , h . Close ( ) )
}