2017-04-10 11:59:45 -07:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2017-01-05 06:13:01 -08:00
package tsdb
import (
2020-07-24 07:10:51 -07:00
"context"
2019-06-19 06:46:24 -07:00
"fmt"
2021-07-20 01:47:20 -07:00
"io"
2018-12-04 02:30:49 -08:00
"math"
2017-09-04 06:07:30 -07:00
"math/rand"
2018-05-17 06:04:32 -07:00
"os"
2021-08-17 10:08:16 -07:00
"path"
2019-06-14 08:39:22 -07:00
"path/filepath"
2023-11-11 08:30:16 -08:00
"reflect"
2018-09-17 04:28:55 -07:00
"sort"
2019-10-02 23:19:55 -07:00
"strconv"
2021-09-08 07:23:44 -07:00
"strings"
2020-03-16 05:59:22 -07:00
"sync"
2017-01-05 06:13:01 -08:00
"testing"
2021-07-20 01:47:20 -07:00
"time"
2017-01-05 06:13:01 -08:00
2023-04-16 05:13:31 -07:00
"github.com/google/go-cmp/cmp"
2021-10-04 22:21:25 -07:00
"github.com/prometheus/client_golang/prometheus"
2018-12-18 02:24:56 -08:00
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
2020-10-29 02:43:23 -07:00
"github.com/stretchr/testify/require"
2021-07-20 01:47:20 -07:00
"go.uber.org/atomic"
2022-01-10 05:36:45 -08:00
"golang.org/x/sync/errgroup"
2020-10-22 02:00:08 -07:00
2021-07-19 21:52:57 -07:00
"github.com/prometheus/prometheus/config"
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/exemplar"
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
"github.com/prometheus/prometheus/model/histogram"
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/labels"
2021-11-17 10:57:31 -08:00
"github.com/prometheus/prometheus/model/value"
2020-02-06 07:58:38 -08:00
"github.com/prometheus/prometheus/storage"
2019-08-13 01:34:14 -07:00
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/index"
2019-09-19 02:15:41 -07:00
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
2019-08-13 01:34:14 -07:00
"github.com/prometheus/prometheus/tsdb/tsdbutil"
2022-10-10 08:08:46 -07:00
"github.com/prometheus/prometheus/tsdb/wlog"
2023-04-16 05:13:31 -07:00
"github.com/prometheus/prometheus/util/testutil"
2017-01-05 06:13:01 -08:00
)
2024-01-29 03:57:27 -08:00
// newTestHeadDefaultOptions returns the HeadOptions that should be used by default in unit tests.
func newTestHeadDefaultOptions ( chunkRange int64 , oooEnabled bool ) * HeadOptions {
2021-02-09 06:12:48 -08:00
opts := DefaultHeadOptions ( )
opts . ChunkRange = chunkRange
2021-07-19 21:52:57 -07:00
opts . EnableExemplarStorage = true
opts . MaxExemplars . Store ( config . DefaultExemplarsConfig . MaxExemplars )
2022-09-14 05:08:34 -07:00
opts . EnableNativeHistograms . Store ( true )
2022-09-20 10:05:50 -07:00
if oooEnabled {
opts . OutOfOrderTimeWindow . Store ( 10 * time . Minute . Milliseconds ( ) )
}
2024-01-29 03:57:27 -08:00
return opts
}
func newTestHead ( t testing . TB , chunkRange int64 , compressWAL wlog . CompressionType , oooEnabled bool ) ( * Head , * wlog . WL ) {
return newTestHeadWithOptions ( t , compressWAL , newTestHeadDefaultOptions ( chunkRange , oooEnabled ) )
}
func newTestHeadWithOptions ( t testing . TB , compressWAL wlog . CompressionType , opts * HeadOptions ) ( * Head , * wlog . WL ) {
dir := t . TempDir ( )
wal , err := wlog . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , compressWAL )
require . NoError ( t , err )
// Override the chunks dir with the testing one.
opts . ChunkDirRoot = dir
2021-11-19 02:11:32 -08:00
2022-10-10 08:08:46 -07:00
h , err := NewHead ( nil , nil , wal , nil , opts , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-21 01:39:02 -07:00
2023-03-16 03:23:47 -07:00
require . NoError ( t , h . chunkDiskMapper . IterateAllChunks ( func ( _ chunks . HeadSeriesRef , _ chunks . ChunkDiskMapperRef , _ , _ int64 , _ uint16 , _ chunkenc . Encoding , _ bool ) error {
2022-09-20 10:05:50 -07:00
return nil
} ) )
2020-07-21 01:39:02 -07:00
2022-10-10 08:08:46 -07:00
return h , wal
2020-07-21 01:39:02 -07:00
}
2017-02-08 16:13:16 -08:00
func BenchmarkCreateSeries ( b * testing . B ) {
2019-06-14 05:30:49 -07:00
series := genSeries ( b . N , 10 , 0 , 0 )
2023-07-11 05:57:57 -07:00
h , _ := newTestHead ( b , 10000 , wlog . CompressionNone , false )
2023-03-29 12:34:34 -07:00
b . Cleanup ( func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( b , h . Close ( ) )
2023-03-29 12:34:34 -07:00
} )
2017-02-08 16:13:16 -08:00
2017-08-30 09:34:54 -07:00
b . ReportAllocs ( )
b . ResetTimer ( )
2017-02-08 16:13:16 -08:00
2019-06-14 05:30:49 -07:00
for _ , s := range series {
h . getOrCreate ( s . Labels ( ) . Hash ( ) , s . Labels ( ) )
2017-08-30 09:34:54 -07:00
}
2017-02-08 16:13:16 -08:00
}
2017-02-14 15:54:52 -08:00
2023-03-29 12:34:34 -07:00
func BenchmarkHeadAppender_Append_Commit_ExistingSeries ( b * testing . B ) {
seriesCounts := [ ] int { 100 , 1000 , 10000 }
series := genSeries ( 10000 , 10 , 0 , 0 )
for _ , seriesCount := range seriesCounts {
b . Run ( fmt . Sprintf ( "%d series" , seriesCount ) , func ( b * testing . B ) {
for _ , samplesPerAppend := range [ ] int64 { 1 , 2 , 5 , 100 } {
b . Run ( fmt . Sprintf ( "%d samples per append" , samplesPerAppend ) , func ( b * testing . B ) {
2023-07-11 05:57:57 -07:00
h , _ := newTestHead ( b , 10000 , wlog . CompressionNone , false )
2023-03-29 12:34:34 -07:00
b . Cleanup ( func ( ) { require . NoError ( b , h . Close ( ) ) } )
ts := int64 ( 1000 )
2023-04-12 04:05:41 -07:00
appendSamples := func ( ) error {
2023-03-29 12:34:34 -07:00
var err error
app := h . Appender ( context . Background ( ) )
for _ , s := range series [ : seriesCount ] {
var ref storage . SeriesRef
for sampleIndex := int64 ( 0 ) ; sampleIndex < samplesPerAppend ; sampleIndex ++ {
ref , err = app . Append ( ref , s . Labels ( ) , ts + sampleIndex , float64 ( ts + sampleIndex ) )
if err != nil {
return err
}
}
}
ts += 1000 // should increment more than highest samplesPerAppend
return app . Commit ( )
}
// Init series, that's not what we're benchmarking here.
2023-04-12 04:05:41 -07:00
require . NoError ( b , appendSamples ( ) )
2023-03-29 12:34:34 -07:00
b . ReportAllocs ( )
b . ResetTimer ( )
for i := 0 ; i < b . N ; i ++ {
2023-04-12 04:05:41 -07:00
require . NoError ( b , appendSamples ( ) )
2023-03-29 12:34:34 -07:00
}
} )
}
} )
}
}
2023-09-12 12:31:10 -07:00
func populateTestWL ( t testing . TB , w * wlog . WL , recs [ ] interface { } ) {
2019-09-19 02:15:41 -07:00
var enc record . Encoder
2018-05-17 06:04:32 -07:00
for _ , r := range recs {
switch v := r . ( type ) {
2019-09-19 02:15:41 -07:00
case [ ] record . RefSeries :
2020-10-29 02:43:23 -07:00
require . NoError ( t , w . Log ( enc . Series ( v , nil ) ) )
2019-09-19 02:15:41 -07:00
case [ ] record . RefSample :
2020-10-29 02:43:23 -07:00
require . NoError ( t , w . Log ( enc . Samples ( v , nil ) ) )
2019-09-19 02:15:41 -07:00
case [ ] tombstones . Stone :
2020-10-29 02:43:23 -07:00
require . NoError ( t , w . Log ( enc . Tombstones ( v , nil ) ) )
2021-05-06 13:53:52 -07:00
case [ ] record . RefExemplar :
require . NoError ( t , w . Log ( enc . Exemplars ( v , nil ) ) )
2023-09-12 12:31:10 -07:00
case [ ] record . RefMmapMarker :
require . NoError ( t , w . Log ( enc . MmapMarkers ( v , nil ) ) )
2017-09-19 01:20:19 -07:00
}
}
2018-05-17 06:04:32 -07:00
}
func readTestWAL ( t testing . TB , dir string ) ( recs [ ] interface { } ) {
2022-10-10 08:08:46 -07:00
sr , err := wlog . NewSegmentsReader ( dir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-01-04 02:24:15 -08:00
defer func ( ) {
require . NoError ( t , sr . Close ( ) )
} ( )
2018-05-17 06:04:32 -07:00
2023-04-16 14:13:47 -07:00
dec := record . NewDecoder ( labels . NewSymbolTable ( ) )
2022-10-10 08:08:46 -07:00
r := wlog . NewReader ( sr )
2018-05-17 06:04:32 -07:00
for r . Next ( ) {
rec := r . Record ( )
switch dec . Type ( rec ) {
2019-09-19 02:15:41 -07:00
case record . Series :
2018-05-17 06:04:32 -07:00
series , err := dec . Series ( rec , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-05-17 06:04:32 -07:00
recs = append ( recs , series )
2019-09-19 02:15:41 -07:00
case record . Samples :
2018-05-17 06:04:32 -07:00
samples , err := dec . Samples ( rec , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-05-17 06:04:32 -07:00
recs = append ( recs , samples )
2023-01-04 02:24:15 -08:00
case record . HistogramSamples :
samples , err := dec . HistogramSamples ( rec , nil )
require . NoError ( t , err )
recs = append ( recs , samples )
case record . FloatHistogramSamples :
samples , err := dec . FloatHistogramSamples ( rec , nil )
require . NoError ( t , err )
recs = append ( recs , samples )
2019-09-19 02:15:41 -07:00
case record . Tombstones :
2018-05-17 06:04:32 -07:00
tstones , err := dec . Tombstones ( rec , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-05-17 06:04:32 -07:00
recs = append ( recs , tstones )
2022-07-19 01:58:52 -07:00
case record . Metadata :
meta , err := dec . Metadata ( rec , nil )
require . NoError ( t , err )
recs = append ( recs , meta )
2023-11-11 08:30:16 -08:00
case record . Exemplars :
exemplars , err := dec . Exemplars ( rec , nil )
require . NoError ( t , err )
recs = append ( recs , exemplars )
2018-05-17 06:04:32 -07:00
default :
2021-09-02 08:43:54 -07:00
require . Fail ( t , "unknown record type" )
2018-05-17 06:04:32 -07:00
}
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , r . Err ( ) )
2018-05-17 06:04:32 -07:00
return recs
2017-09-19 01:20:19 -07:00
}
2023-09-12 12:31:10 -07:00
func BenchmarkLoadWLs ( b * testing . B ) {
2019-10-02 23:19:55 -07:00
cases := [ ] struct {
// Total series is (batches*seriesPerBatch).
batches int
seriesPerBatch int
samplesPerSeries int
2021-08-11 03:13:17 -07:00
mmappedChunkT int64
2023-09-12 12:31:10 -07:00
// The first oooSeriesPct*seriesPerBatch series in a batch are selected as "OOO" series.
oooSeriesPct float64
// The first oooSamplesPct*samplesPerSeries samples in an OOO series are written as OOO samples.
oooSamplesPct float64
oooCapMax int64
2019-10-02 23:19:55 -07:00
} {
2019-11-07 08:26:45 -08:00
{ // Less series and more samples. 2 hour WAL with 1 second scrape interval.
2019-10-02 23:19:55 -07:00
batches : 10 ,
seriesPerBatch : 100 ,
2019-11-07 08:26:45 -08:00
samplesPerSeries : 7200 ,
2019-10-02 23:19:55 -07:00
} ,
{ // More series and less samples.
batches : 10 ,
seriesPerBatch : 10000 ,
2019-11-07 08:26:45 -08:00
samplesPerSeries : 50 ,
2019-10-02 23:19:55 -07:00
} ,
{ // In between.
batches : 10 ,
seriesPerBatch : 1000 ,
2019-11-07 08:26:45 -08:00
samplesPerSeries : 480 ,
2019-10-02 23:19:55 -07:00
} ,
2021-08-11 03:13:17 -07:00
{ // 2 hour WAL with 15 second scrape interval, and mmapped chunks up to last 100 samples.
batches : 100 ,
seriesPerBatch : 1000 ,
samplesPerSeries : 480 ,
mmappedChunkT : 3800 ,
} ,
2023-09-12 12:31:10 -07:00
{ // A lot of OOO samples (50% series with 50% of samples being OOO).
batches : 10 ,
seriesPerBatch : 1000 ,
samplesPerSeries : 480 ,
oooSeriesPct : 0.5 ,
oooSamplesPct : 0.5 ,
oooCapMax : DefaultOutOfOrderCapMax ,
} ,
{ // Fewer OOO samples (10% of series with 10% of samples being OOO).
batches : 10 ,
seriesPerBatch : 1000 ,
samplesPerSeries : 480 ,
oooSeriesPct : 0.1 ,
oooSamplesPct : 0.1 ,
} ,
{ // 2 hour WAL with 15 second scrape interval, and mmapped chunks up to last 100 samples.
// Four mmap markers per OOO series: 480 * 0.3 = 144, 144 / 32 (DefaultOutOfOrderCapMax) = 4.
batches : 100 ,
seriesPerBatch : 1000 ,
samplesPerSeries : 480 ,
mmappedChunkT : 3800 ,
oooSeriesPct : 0.2 ,
oooSamplesPct : 0.3 ,
oooCapMax : DefaultOutOfOrderCapMax ,
} ,
2019-10-02 23:19:55 -07:00
}
labelsPerSeries := 5
2021-05-06 13:53:52 -07:00
// Rough estimates of most common % of samples that have an exemplar for each scrape.
exemplarsPercentages := [ ] float64 { 0 , 0.5 , 1 , 5 }
lastExemplarsPerSeries := - 1
2019-10-02 23:19:55 -07:00
for _ , c := range cases {
2021-05-06 13:53:52 -07:00
for _ , p := range exemplarsPercentages {
exemplarsPerSeries := int ( math . RoundToEven ( float64 ( c . samplesPerSeries ) * p / 100 ) )
// For tests with low samplesPerSeries we could end up testing with 0 exemplarsPerSeries
// multiple times without this check.
if exemplarsPerSeries == lastExemplarsPerSeries {
continue
}
lastExemplarsPerSeries = exemplarsPerSeries
2023-09-12 12:31:10 -07:00
b . Run ( fmt . Sprintf ( "batches=%d,seriesPerBatch=%d,samplesPerSeries=%d,exemplarsPerSeries=%d,mmappedChunkT=%d,oooSeriesPct=%.3f,oooSamplesPct=%.3f,oooCapMax=%d" , c . batches , c . seriesPerBatch , c . samplesPerSeries , exemplarsPerSeries , c . mmappedChunkT , c . oooSeriesPct , c . oooSamplesPct , c . oooCapMax ) ,
2021-05-06 13:53:52 -07:00
func ( b * testing . B ) {
2022-01-22 01:55:01 -08:00
dir := b . TempDir ( )
2021-05-06 13:53:52 -07:00
2023-09-12 12:31:10 -07:00
wal , err := wlog . New ( nil , nil , dir , wlog . CompressionNone )
2021-05-06 13:53:52 -07:00
require . NoError ( b , err )
2023-09-12 12:31:10 -07:00
var wbl * wlog . WL
if c . oooSeriesPct != 0 {
wbl , err = wlog . New ( nil , nil , dir , wlog . CompressionNone )
require . NoError ( b , err )
}
2021-05-06 13:53:52 -07:00
// Write series.
refSeries := make ( [ ] record . RefSeries , 0 , c . seriesPerBatch )
for k := 0 ; k < c . batches ; k ++ {
refSeries = refSeries [ : 0 ]
for i := k * c . seriesPerBatch ; i < ( k + 1 ) * c . seriesPerBatch ; i ++ {
lbls := make ( map [ string ] string , labelsPerSeries )
lbls [ defaultLabelName ] = strconv . Itoa ( i )
for j := 1 ; len ( lbls ) < labelsPerSeries ; j ++ {
lbls [ defaultLabelName + strconv . Itoa ( j ) ] = defaultLabelValue + strconv . Itoa ( j )
}
2021-11-06 03:10:04 -07:00
refSeries = append ( refSeries , record . RefSeries { Ref : chunks . HeadSeriesRef ( i ) * 101 , Labels : labels . FromMap ( lbls ) } )
2019-10-02 23:19:55 -07:00
}
2023-09-12 12:31:10 -07:00
populateTestWL ( b , wal , [ ] interface { } { refSeries } )
2019-10-02 23:19:55 -07:00
}
2021-05-06 13:53:52 -07:00
// Write samples.
refSamples := make ( [ ] record . RefSample , 0 , c . seriesPerBatch )
2023-09-12 12:31:10 -07:00
oooSeriesPerBatch := int ( float64 ( c . seriesPerBatch ) * c . oooSeriesPct )
oooSamplesPerSeries := int ( float64 ( c . samplesPerSeries ) * c . oooSamplesPct )
2021-05-06 13:53:52 -07:00
for i := 0 ; i < c . samplesPerSeries ; i ++ {
for j := 0 ; j < c . batches ; j ++ {
refSamples = refSamples [ : 0 ]
2023-09-12 12:31:10 -07:00
k := j * c . seriesPerBatch
// Skip appending the first oooSamplesPerSeries samples for the series in the batch that
// should have OOO samples. OOO samples are appended after all the in-order samples.
if i < oooSamplesPerSeries {
k += oooSeriesPerBatch
}
for ; k < ( j + 1 ) * c . seriesPerBatch ; k ++ {
2021-05-06 13:53:52 -07:00
refSamples = append ( refSamples , record . RefSample {
2021-11-06 03:10:04 -07:00
Ref : chunks . HeadSeriesRef ( k ) * 101 ,
2021-05-06 13:53:52 -07:00
T : int64 ( i ) * 10 ,
V : float64 ( i ) * 100 ,
} )
}
2023-09-12 12:31:10 -07:00
populateTestWL ( b , wal , [ ] interface { } { refSamples } )
2021-05-06 13:53:52 -07:00
}
}
2021-08-11 03:13:17 -07:00
// Write mmapped chunks.
if c . mmappedChunkT != 0 {
2022-01-10 05:36:45 -08:00
chunkDiskMapper , err := chunks . NewChunkDiskMapper ( nil , mmappedChunksDir ( dir ) , chunkenc . NewPool ( ) , chunks . DefaultWriteBufferSize , chunks . DefaultWriteQueueSize )
2021-08-11 03:13:17 -07:00
require . NoError ( b , err )
2023-05-25 13:12:32 -07:00
cOpts := chunkOpts {
chunkDiskMapper : chunkDiskMapper ,
chunkRange : c . mmappedChunkT ,
samplesPerChunk : DefaultSamplesPerChunk ,
}
2021-08-11 03:13:17 -07:00
for k := 0 ; k < c . batches * c . seriesPerBatch ; k ++ {
// Create one mmapped chunk per series, with one sample at the given time.
2024-01-29 03:57:27 -08:00
s := newMemSeries ( labels . Labels { } , chunks . HeadSeriesRef ( k ) * 101 , 0 , defaultIsolationDisabled )
2023-05-25 13:12:32 -07:00
s . append ( c . mmappedChunkT , 42 , 0 , cOpts )
2023-09-12 12:31:10 -07:00
// There's only one head chunk because only a single sample is appended. mmapChunks()
// ignores the latest chunk, so we need to cut a new head chunk to guarantee the chunk with
// the sample at c.mmappedChunkT is mmapped.
s . cutNewHeadChunk ( c . mmappedChunkT , chunkenc . EncXOR , c . mmappedChunkT )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
s . mmapChunks ( chunkDiskMapper )
2021-08-11 03:13:17 -07:00
}
require . NoError ( b , chunkDiskMapper . Close ( ) )
}
// Write exemplars.
2021-05-06 13:53:52 -07:00
refExemplars := make ( [ ] record . RefExemplar , 0 , c . seriesPerBatch )
for i := 0 ; i < exemplarsPerSeries ; i ++ {
for j := 0 ; j < c . batches ; j ++ {
refExemplars = refExemplars [ : 0 ]
for k := j * c . seriesPerBatch ; k < ( j + 1 ) * c . seriesPerBatch ; k ++ {
refExemplars = append ( refExemplars , record . RefExemplar {
2021-11-06 03:10:04 -07:00
Ref : chunks . HeadSeriesRef ( k ) * 101 ,
2021-05-06 13:53:52 -07:00
T : int64 ( i ) * 10 ,
V : float64 ( i ) * 100 ,
2024-02-15 06:19:54 -08:00
Labels : labels . FromStrings ( "trace_id" , fmt . Sprintf ( "trace-%d" , i ) ) ,
2021-05-06 13:53:52 -07:00
} )
}
2023-09-12 12:31:10 -07:00
populateTestWL ( b , wal , [ ] interface { } { refExemplars } )
}
}
// Write OOO samples and mmap markers.
refMarkers := make ( [ ] record . RefMmapMarker , 0 , oooSeriesPerBatch )
refSamples = make ( [ ] record . RefSample , 0 , oooSeriesPerBatch )
for i := 0 ; i < oooSamplesPerSeries ; i ++ {
shouldAddMarkers := c . oooCapMax != 0 && i != 0 && int64 ( i ) % c . oooCapMax == 0
for j := 0 ; j < c . batches ; j ++ {
refSamples = refSamples [ : 0 ]
if shouldAddMarkers {
refMarkers = refMarkers [ : 0 ]
}
for k := j * c . seriesPerBatch ; k < ( j * c . seriesPerBatch ) + oooSeriesPerBatch ; k ++ {
ref := chunks . HeadSeriesRef ( k ) * 101
if shouldAddMarkers {
// loadWBL() checks that the marker's MmapRef is less than or equal to the ref
// for the last mmap chunk. Setting MmapRef to 0 to always pass that check.
refMarkers = append ( refMarkers , record . RefMmapMarker { Ref : ref , MmapRef : 0 } )
}
refSamples = append ( refSamples , record . RefSample {
Ref : ref ,
T : int64 ( i ) * 10 ,
V : float64 ( i ) * 100 ,
} )
}
if shouldAddMarkers {
populateTestWL ( b , wbl , [ ] interface { } { refMarkers } )
}
populateTestWL ( b , wal , [ ] interface { } { refSamples } )
populateTestWL ( b , wbl , [ ] interface { } { refSamples } )
2019-10-02 23:19:55 -07:00
}
}
2021-05-06 13:53:52 -07:00
b . ResetTimer ( )
2019-10-02 23:19:55 -07:00
2021-05-06 13:53:52 -07:00
// Load the WAL.
for i := 0 ; i < b . N ; i ++ {
opts := DefaultHeadOptions ( )
opts . ChunkRange = 1000
2023-09-12 12:31:10 -07:00
opts . ChunkDirRoot = dir
if c . oooCapMax > 0 {
opts . OutOfOrderCapMax . Store ( c . oooCapMax )
}
h , err := NewHead ( nil , nil , wal , wbl , opts , nil )
2021-05-06 13:53:52 -07:00
require . NoError ( b , err )
h . Init ( 0 )
}
2021-08-11 03:13:17 -07:00
b . StopTimer ( )
2023-09-12 12:31:10 -07:00
wal . Close ( )
if wbl != nil {
wbl . Close ( )
}
2021-05-06 13:53:52 -07:00
} )
}
2019-10-02 23:19:55 -07:00
}
}
2024-09-02 09:20:10 -07:00
// BenchmarkLoadRealWLs will be skipped unless the BENCHMARK_LOAD_REAL_WLS_DIR environment variable is set.
// BENCHMARK_LOAD_REAL_WLS_DIR should be the folder where `wal` and `chunks_head` are located.
func BenchmarkLoadRealWLs ( b * testing . B ) {
dir := os . Getenv ( "BENCHMARK_LOAD_REAL_WLS_DIR" )
if dir == "" {
b . SkipNow ( )
}
wal , err := wlog . New ( nil , nil , filepath . Join ( dir , "wal" ) , wlog . CompressionNone )
require . NoError ( b , err )
b . Cleanup ( func ( ) { wal . Close ( ) } )
wbl , err := wlog . New ( nil , nil , filepath . Join ( dir , "wbl" ) , wlog . CompressionNone )
require . NoError ( b , err )
b . Cleanup ( func ( ) { wbl . Close ( ) } )
// Load the WAL.
for i := 0 ; i < b . N ; i ++ {
opts := DefaultHeadOptions ( )
opts . ChunkDirRoot = dir
h , err := NewHead ( nil , nil , wal , wbl , opts , nil )
require . NoError ( b , err )
require . NoError ( b , h . Init ( 0 ) )
}
}
2022-01-10 05:36:45 -08:00
// TestHead_HighConcurrencyReadAndWrite generates 1000 series with a step of 15s and fills a whole block with samples,
// this means in total it generates 4000 chunks because with a step of 15s there are 4 chunks per block per series.
// While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the
// returned results are correct.
func TestHead_HighConcurrencyReadAndWrite ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , DefaultBlockDuration , wlog . CompressionNone , false )
2022-01-10 05:36:45 -08:00
defer func ( ) {
require . NoError ( t , head . Close ( ) )
} ( )
seriesCnt := 1000
readConcurrency := 2
writeConcurrency := 10
startTs := uint64 ( DefaultBlockDuration ) // start at the second block relative to the unix epoch.
qryRange := uint64 ( 5 * time . Minute . Milliseconds ( ) )
step := uint64 ( 15 * time . Second / time . Millisecond )
endTs := startTs + uint64 ( DefaultBlockDuration )
labelSets := make ( [ ] labels . Labels , seriesCnt )
for i := 0 ; i < seriesCnt ; i ++ {
labelSets [ i ] = labels . FromStrings ( "seriesId" , strconv . Itoa ( i ) )
}
head . Init ( 0 )
g , ctx := errgroup . WithContext ( context . Background ( ) )
whileNotCanceled := func ( f func ( ) ( bool , error ) ) error {
for ctx . Err ( ) == nil {
cont , err := f ( )
if err != nil {
return err
}
if ! cont {
return nil
}
}
return nil
}
// Create one channel for each write worker, the channels will be used by the coordinator
// go routine to coordinate which timestamps each write worker has to write.
writerTsCh := make ( [ ] chan uint64 , writeConcurrency )
for writerTsChIdx := range writerTsCh {
writerTsCh [ writerTsChIdx ] = make ( chan uint64 )
}
// workerReadyWg is used to synchronize the start of the test,
// we only start the test once all workers signal that they're ready.
var workerReadyWg sync . WaitGroup
workerReadyWg . Add ( writeConcurrency + readConcurrency )
// Start the write workers.
for wid := 0 ; wid < writeConcurrency ; wid ++ {
// Create copy of workerID to be used by worker routine.
workerID := wid
g . Go ( func ( ) error {
// The label sets which this worker will write.
workerLabelSets := labelSets [ ( seriesCnt / writeConcurrency ) * workerID : ( seriesCnt / writeConcurrency ) * ( workerID + 1 ) ]
// Signal that this worker is ready.
workerReadyWg . Done ( )
return whileNotCanceled ( func ( ) ( bool , error ) {
ts , ok := <- writerTsCh [ workerID ]
if ! ok {
return false , nil
}
app := head . Appender ( ctx )
for i := 0 ; i < len ( workerLabelSets ) ; i ++ {
// We also use the timestamp as the sample value.
_ , err := app . Append ( 0 , workerLabelSets [ i ] , int64 ( ts ) , float64 ( ts ) )
if err != nil {
return false , fmt . Errorf ( "Error when appending to head: %w" , err )
}
}
return true , app . Commit ( )
} )
} )
}
// queryHead is a helper to query the head for a given time range and labelset.
2023-08-24 06:21:17 -07:00
queryHead := func ( mint , maxt uint64 , label labels . Label ) ( map [ string ] [ ] chunks . Sample , error ) {
2022-01-10 05:36:45 -08:00
q , err := NewBlockQuerier ( head , int64 ( mint ) , int64 ( maxt ) )
if err != nil {
return nil , err
}
return query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , label . Name , label . Value ) ) , nil
}
// readerTsCh will be used by the coordinator go routine to coordinate which timestamps the reader should read.
readerTsCh := make ( chan uint64 )
// Start the read workers.
for wid := 0 ; wid < readConcurrency ; wid ++ {
// Create copy of threadID to be used by worker routine.
workerID := wid
g . Go ( func ( ) error {
querySeriesRef := ( seriesCnt / readConcurrency ) * workerID
// Signal that this worker is ready.
workerReadyWg . Done ( )
return whileNotCanceled ( func ( ) ( bool , error ) {
ts , ok := <- readerTsCh
if ! ok {
return false , nil
}
querySeriesRef = ( querySeriesRef + 1 ) % seriesCnt
lbls := labelSets [ querySeriesRef ]
2022-03-09 14:17:29 -08:00
// lbls has a single entry; extract it so we can run a query.
var lbl labels . Label
lbls . Range ( func ( l labels . Label ) {
lbl = l
} )
samples , err := queryHead ( ts - qryRange , ts , lbl )
2022-01-10 05:36:45 -08:00
if err != nil {
return false , err
}
if len ( samples ) != 1 {
return false , fmt . Errorf ( "expected 1 series, got %d" , len ( samples ) )
}
series := lbls . String ( )
expectSampleCnt := qryRange / step + 1
if expectSampleCnt != uint64 ( len ( samples [ series ] ) ) {
return false , fmt . Errorf ( "expected %d samples, got %d" , expectSampleCnt , len ( samples [ series ] ) )
}
for sampleIdx , sample := range samples [ series ] {
expectedValue := ts - qryRange + ( uint64 ( sampleIdx ) * step )
if sample . T ( ) != int64 ( expectedValue ) {
return false , fmt . Errorf ( "expected sample %d to have ts %d, got %d" , sampleIdx , expectedValue , sample . T ( ) )
}
2023-03-30 10:50:13 -07:00
if sample . F ( ) != float64 ( expectedValue ) {
return false , fmt . Errorf ( "expected sample %d to have value %d, got %f" , sampleIdx , expectedValue , sample . F ( ) )
2022-01-10 05:36:45 -08:00
}
}
return true , nil
} )
} )
}
// Start the coordinator go routine.
g . Go ( func ( ) error {
currTs := startTs
defer func ( ) {
// End of the test, close all channels to stop the workers.
for _ , ch := range writerTsCh {
close ( ch )
}
close ( readerTsCh )
} ( )
// Wait until all workers are ready to start the test.
workerReadyWg . Wait ( )
return whileNotCanceled ( func ( ) ( bool , error ) {
// Send the current timestamp to each of the writers.
for _ , ch := range writerTsCh {
select {
case ch <- currTs :
case <- ctx . Done ( ) :
return false , nil
}
}
// Once data for at least <qryRange> has been ingested, send the current timestamp to the readers.
if currTs > startTs + qryRange {
select {
case readerTsCh <- currTs - step :
case <- ctx . Done ( ) :
return false , nil
}
}
currTs += step
if currTs > endTs {
return false , nil
}
return true , nil
} )
} )
require . NoError ( t , g . Wait ( ) )
}
2017-09-19 01:20:19 -07:00
func TestHead_ReadWAL ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
for _ , compress := range [ ] wlog . CompressionType { wlog . CompressionNone , wlog . CompressionSnappy , wlog . CompressionZstd } {
t . Run ( fmt . Sprintf ( "compress=%s" , compress ) , func ( t * testing . T ) {
2019-06-19 06:46:24 -07:00
entries := [ ] interface { } {
2019-09-19 02:15:41 -07:00
[ ] record . RefSeries {
2019-06-19 06:46:24 -07:00
{ Ref : 10 , Labels : labels . FromStrings ( "a" , "1" ) } ,
{ Ref : 11 , Labels : labels . FromStrings ( "a" , "2" ) } ,
{ Ref : 100 , Labels : labels . FromStrings ( "a" , "3" ) } ,
} ,
2019-09-19 02:15:41 -07:00
[ ] record . RefSample {
2019-06-19 06:46:24 -07:00
{ Ref : 0 , T : 99 , V : 1 } ,
{ Ref : 10 , T : 100 , V : 2 } ,
{ Ref : 100 , T : 100 , V : 3 } ,
} ,
2019-09-19 02:15:41 -07:00
[ ] record . RefSeries {
2019-06-19 06:46:24 -07:00
{ Ref : 50 , Labels : labels . FromStrings ( "a" , "4" ) } ,
// This series has two refs pointing to it.
{ Ref : 101 , Labels : labels . FromStrings ( "a" , "3" ) } ,
} ,
2019-09-19 02:15:41 -07:00
[ ] record . RefSample {
2019-06-19 06:46:24 -07:00
{ Ref : 10 , T : 101 , V : 5 } ,
{ Ref : 50 , T : 101 , V : 6 } ,
{ Ref : 101 , T : 101 , V : 7 } ,
} ,
2019-09-19 02:15:41 -07:00
[ ] tombstones . Stone {
{ Ref : 0 , Intervals : [ ] tombstones . Interval { { Mint : 99 , Maxt : 101 } } } ,
2019-06-19 06:46:24 -07:00
} ,
2021-05-06 13:53:52 -07:00
[ ] record . RefExemplar {
2024-02-15 06:19:54 -08:00
{ Ref : 10 , T : 100 , V : 1 , Labels : labels . FromStrings ( "trace_id" , "asdf" ) } ,
2021-05-06 13:53:52 -07:00
} ,
2019-06-19 06:46:24 -07:00
}
2020-05-06 08:30:00 -07:00
2022-09-20 10:05:50 -07:00
head , w := newTestHead ( t , 1000 , compress , false )
2019-06-19 06:46:24 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Close ( ) )
2019-06-19 06:46:24 -07:00
} ( )
2018-05-17 06:04:32 -07:00
2023-09-12 12:31:10 -07:00
populateTestWL ( t , w , entries )
2017-09-19 01:20:19 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Init ( math . MinInt64 ) )
require . Equal ( t , uint64 ( 101 ) , head . lastSeriesID . Load ( ) )
2017-09-19 01:20:19 -07:00
2019-06-19 06:46:24 -07:00
s10 := head . series . getByID ( 10 )
s11 := head . series . getByID ( 11 )
s50 := head . series . getByID ( 50 )
s100 := head . series . getByID ( 100 )
2017-09-19 01:20:19 -07:00
2024-01-25 14:55:58 -08:00
testutil . RequireEqual ( t , labels . FromStrings ( "a" , "1" ) , s10 . lset )
require . Nil ( t , s11 ) // Series without samples should be garbage collected at head.Init().
testutil . RequireEqual ( t , labels . FromStrings ( "a" , "4" ) , s50 . lset )
testutil . RequireEqual ( t , labels . FromStrings ( "a" , "3" ) , s100 . lset )
2017-09-19 01:20:19 -07:00
2019-06-19 06:46:24 -07:00
expandChunk := func ( c chunkenc . Iterator ) ( x [ ] sample ) {
2021-11-28 23:54:23 -08:00
for c . Next ( ) == chunkenc . ValFloat {
2019-06-19 06:46:24 -07:00
t , v := c . At ( )
2023-03-30 10:50:13 -07:00
x = append ( x , sample { t : t , f : v } )
2019-06-19 06:46:24 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , c . Err ( ) )
2019-06-19 06:46:24 -07:00
return x
}
2023-02-21 01:30:11 -08:00
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
c , _ , _ , err := s10 . chunk ( 0 , head . chunkDiskMapper , & head . memChunkPool )
2023-02-21 01:30:11 -08:00
require . NoError ( t , err )
require . Equal ( t , [ ] sample { { 100 , 2 , nil , nil } , { 101 , 5 , nil , nil } } , expandChunk ( c . chunk . Iterator ( nil ) ) )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
c , _ , _ , err = s50 . chunk ( 0 , head . chunkDiskMapper , & head . memChunkPool )
2023-02-21 01:30:11 -08:00
require . NoError ( t , err )
require . Equal ( t , [ ] sample { { 101 , 6 , nil , nil } } , expandChunk ( c . chunk . Iterator ( nil ) ) )
2021-08-03 07:33:54 -07:00
// The samples before the new series record should be discarded since a duplicate record
// is only possible when old samples were compacted.
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
c , _ , _ , err = s100 . chunk ( 0 , head . chunkDiskMapper , & head . memChunkPool )
2023-02-21 01:30:11 -08:00
require . NoError ( t , err )
require . Equal ( t , [ ] sample { { 101 , 7 , nil , nil } } , expandChunk ( c . chunk . Iterator ( nil ) ) )
2021-05-06 13:53:52 -07:00
q , err := head . ExemplarQuerier ( context . Background ( ) )
require . NoError ( t , err )
e , err := q . Select ( 0 , 1000 , [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "a" , "1" ) } )
require . NoError ( t , err )
2024-02-15 06:19:54 -08:00
require . True ( t , exemplar . Exemplar { Ts : 100 , Value : 1 , Labels : labels . FromStrings ( "trace_id" , "asdf" ) } . Equals ( e [ 0 ] . Exemplars [ 0 ] ) )
2019-06-19 06:46:24 -07:00
} )
2017-09-19 01:20:19 -07:00
}
2019-06-06 06:28:54 -07:00
}
func TestHead_WALMultiRef ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
head , w := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2019-06-06 06:28:54 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Init ( 0 ) )
2020-05-06 08:30:00 -07:00
2020-07-30 04:11:13 -07:00
app := head . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
ref1 , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 100 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( head . metrics . chunksCreated ) )
2020-05-06 08:30:00 -07:00
// Add another sample outside chunk range to mmap a chunk.
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 1500 , 2 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 2.0 , prom_testutil . ToFloat64 ( head . metrics . chunksCreated ) )
2019-06-06 06:28:54 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Truncate ( 1600 ) )
2019-06-06 06:28:54 -07:00
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
ref2 , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 1700 , 3 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 3.0 , prom_testutil . ToFloat64 ( head . metrics . chunksCreated ) )
2019-06-06 06:28:54 -07:00
2020-05-06 08:30:00 -07:00
// Add another sample outside chunk range to mmap a chunk.
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 2000 , 4 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 4.0 , prom_testutil . ToFloat64 ( head . metrics . chunksCreated ) )
2020-05-06 08:30:00 -07:00
2020-10-29 02:43:23 -07:00
require . NotEqual ( t , ref1 , ref2 , "Refs are the same" )
require . NoError ( t , head . Close ( ) )
2019-06-06 06:28:54 -07:00
2023-07-11 05:57:57 -07:00
w , err = wlog . New ( nil , nil , w . Dir ( ) , wlog . CompressionNone )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-06-06 06:28:54 -07:00
2021-02-09 06:12:48 -08:00
opts := DefaultHeadOptions ( )
opts . ChunkRange = 1000
opts . ChunkDirRoot = w . Dir ( )
2022-09-20 10:05:50 -07:00
head , err = NewHead ( nil , nil , w , nil , opts , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , head . Init ( 0 ) )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2019-06-06 06:28:54 -07:00
2020-05-06 08:30:00 -07:00
q , err := NewBlockQuerier ( head , 0 , 2100 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-11-18 11:53:33 -08:00
series := query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2021-08-03 07:33:54 -07:00
// The samples before the new ref should be discarded since Head truncation
// happens only after compacting the Head.
2023-08-24 06:21:17 -07:00
require . Equal ( t , map [ string ] [ ] chunks . Sample { ` { foo="bar"} ` : {
2021-11-28 23:54:23 -08:00
sample { 1700 , 3 , nil , nil } ,
sample { 2000 , 4 , nil , nil } ,
2020-05-06 08:30:00 -07:00
} } , series )
2017-09-19 01:20:19 -07:00
}
2021-09-08 02:19:58 -07:00
func TestHead_ActiveAppenders ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2021-09-08 02:19:58 -07:00
defer head . Close ( )
require . NoError ( t , head . Init ( 0 ) )
// First rollback with no samples.
app := head . Appender ( context . Background ( ) )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( head . metrics . activeAppenders ) )
require . NoError ( t , app . Rollback ( ) )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( head . metrics . activeAppenders ) )
// Then commit with no samples.
app = head . Appender ( context . Background ( ) )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( head . metrics . activeAppenders ) )
// Now rollback with one sample.
app = head . Appender ( context . Background ( ) )
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 100 , 1 )
require . NoError ( t , err )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( head . metrics . activeAppenders ) )
require . NoError ( t , app . Rollback ( ) )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( head . metrics . activeAppenders ) )
// Now commit with one sample.
app = head . Appender ( context . Background ( ) )
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 100 , 1 )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( head . metrics . activeAppenders ) )
}
2020-10-05 02:09:59 -07:00
func TestHead_UnknownWALRecord ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
head , w := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-10-05 02:09:59 -07:00
w . Log ( [ ] byte { 255 , 42 } )
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Init ( 0 ) )
require . NoError ( t , head . Close ( ) )
2020-10-05 02:09:59 -07:00
}
2024-06-18 03:28:56 -07:00
// BenchmarkHead_Truncate is quite heavy, so consider running it with
// -benchtime=10x or similar to get more stable and comparable results.
func BenchmarkHead_Truncate ( b * testing . B ) {
const total = 1e6
prepare := func ( b * testing . B , churn int ) * Head {
h , _ := newTestHead ( b , 1000 , wlog . CompressionNone , false )
b . Cleanup ( func ( ) {
require . NoError ( b , h . Close ( ) )
} )
h . initTime ( 0 )
internedItoa := map [ int ] string { }
var mtx sync . RWMutex
itoa := func ( i int ) string {
mtx . RLock ( )
s , ok := internedItoa [ i ]
mtx . RUnlock ( )
if ok {
return s
}
mtx . Lock ( )
s = strconv . Itoa ( i )
internedItoa [ i ] = s
mtx . Unlock ( )
return s
}
allSeries := [ total ] labels . Labels { }
nameValues := make ( [ ] string , 0 , 100 )
for i := 0 ; i < total ; i ++ {
nameValues = nameValues [ : 0 ]
// A thousand labels like lbl_x_of_1000, each with total/1000 values
thousand := "lbl_" + itoa ( i % 1000 ) + "_of_1000"
nameValues = append ( nameValues , thousand , itoa ( i / 1000 ) )
// A hundred labels like lbl_x_of_100, each with total/100 values.
hundred := "lbl_" + itoa ( i % 100 ) + "_of_100"
nameValues = append ( nameValues , hundred , itoa ( i / 100 ) )
if i % 13 == 0 {
ten := "lbl_" + itoa ( i % 10 ) + "_of_10"
nameValues = append ( nameValues , ten , itoa ( i % 10 ) )
}
allSeries [ i ] = labels . FromStrings ( append ( nameValues , "first" , "a" , "second" , "a" , "third" , "a" ) ... )
s , _ , _ := h . getOrCreate ( allSeries [ i ] . Hash ( ) , allSeries [ i ] )
s . mmappedChunks = [ ] * mmappedChunk {
{ minTime : 1000 * int64 ( i / churn ) , maxTime : 999 + 1000 * int64 ( i / churn ) } ,
}
}
return h
}
for _ , churn := range [ ] int { 10 , 100 , 1000 } {
b . Run ( fmt . Sprintf ( "churn=%d" , churn ) , func ( b * testing . B ) {
if b . N > total / churn {
// Just to make sure that benchmark still makes sense.
panic ( "benchmark not prepared" )
}
h := prepare ( b , churn )
b . ResetTimer ( )
for i := 0 ; i < b . N ; i ++ {
require . NoError ( b , h . Truncate ( 1000 * int64 ( i ) ) )
// Make sure the benchmark is meaningful and it's actually truncating the expected amount of series.
require . Equal ( b , total - churn * i , int ( h . NumSeries ( ) ) )
}
} )
}
}
2017-09-01 05:38:49 -07:00
func TestHead_Truncate ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
h , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-02-12 11:22:27 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-02-12 11:22:27 -08:00
} ( )
2017-09-01 05:38:49 -07:00
h . initTime ( 0 )
2023-09-14 07:02:04 -07:00
ctx := context . Background ( )
2020-05-20 06:22:08 -07:00
s1 , _ , _ := h . getOrCreate ( 1 , labels . FromStrings ( "a" , "1" , "b" , "1" ) )
s2 , _ , _ := h . getOrCreate ( 2 , labels . FromStrings ( "a" , "2" , "b" , "1" ) )
s3 , _ , _ := h . getOrCreate ( 3 , labels . FromStrings ( "a" , "1" , "b" , "2" ) )
s4 , _ , _ := h . getOrCreate ( 4 , labels . FromStrings ( "a" , "2" , "b" , "2" , "c" , "1" ) )
2017-09-01 05:38:49 -07:00
2020-05-06 08:30:00 -07:00
s1 . mmappedChunks = [ ] * mmappedChunk {
{ minTime : 0 , maxTime : 999 } ,
{ minTime : 1000 , maxTime : 1999 } ,
{ minTime : 2000 , maxTime : 2999 } ,
2017-09-01 05:38:49 -07:00
}
2020-05-06 08:30:00 -07:00
s2 . mmappedChunks = [ ] * mmappedChunk {
{ minTime : 1000 , maxTime : 1999 } ,
{ minTime : 2000 , maxTime : 2999 } ,
{ minTime : 3000 , maxTime : 3999 } ,
2017-09-01 05:38:49 -07:00
}
2020-05-06 08:30:00 -07:00
s3 . mmappedChunks = [ ] * mmappedChunk {
{ minTime : 0 , maxTime : 999 } ,
{ minTime : 1000 , maxTime : 1999 } ,
2017-09-01 05:38:49 -07:00
}
2020-05-06 08:30:00 -07:00
s4 . mmappedChunks = [ ] * mmappedChunk { }
2017-09-01 05:38:49 -07:00
2017-10-04 04:28:07 -07:00
// Truncation need not be aligned.
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 1 ) )
2017-09-01 05:38:49 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 2000 ) )
2017-09-01 05:38:49 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , [ ] * mmappedChunk {
2020-05-06 08:30:00 -07:00
{ minTime : 2000 , maxTime : 2999 } ,
} , h . series . getByID ( s1 . ref ) . mmappedChunks )
2017-09-01 05:38:49 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , [ ] * mmappedChunk {
2020-05-06 08:30:00 -07:00
{ minTime : 2000 , maxTime : 2999 } ,
{ minTime : 3000 , maxTime : 3999 } ,
} , h . series . getByID ( s2 . ref ) . mmappedChunks )
2017-09-01 05:38:49 -07:00
2020-10-29 02:43:23 -07:00
require . Nil ( t , h . series . getByID ( s3 . ref ) )
require . Nil ( t , h . series . getByID ( s4 . ref ) )
2017-09-01 05:38:49 -07:00
2017-11-30 06:34:49 -08:00
postingsA1 , _ := index . ExpandPostings ( h . postings . Get ( "a" , "1" ) )
postingsA2 , _ := index . ExpandPostings ( h . postings . Get ( "a" , "2" ) )
postingsB1 , _ := index . ExpandPostings ( h . postings . Get ( "b" , "1" ) )
postingsB2 , _ := index . ExpandPostings ( h . postings . Get ( "b" , "2" ) )
postingsC1 , _ := index . ExpandPostings ( h . postings . Get ( "c" , "1" ) )
postingsAll , _ := index . ExpandPostings ( h . postings . Get ( "" , "" ) )
2017-09-01 05:38:49 -07:00
2021-11-06 03:10:04 -07:00
require . Equal ( t , [ ] storage . SeriesRef { storage . SeriesRef ( s1 . ref ) } , postingsA1 )
require . Equal ( t , [ ] storage . SeriesRef { storage . SeriesRef ( s2 . ref ) } , postingsA2 )
require . Equal ( t , [ ] storage . SeriesRef { storage . SeriesRef ( s1 . ref ) , storage . SeriesRef ( s2 . ref ) } , postingsB1 )
require . Equal ( t , [ ] storage . SeriesRef { storage . SeriesRef ( s1 . ref ) , storage . SeriesRef ( s2 . ref ) } , postingsAll )
2020-10-29 02:43:23 -07:00
require . Nil ( t , postingsB2 )
require . Nil ( t , postingsC1 )
2017-09-01 05:38:49 -07:00
2021-09-08 02:18:48 -07:00
iter := h . postings . Symbols ( )
symbols := [ ] string { }
for iter . Next ( ) {
symbols = append ( symbols , iter . At ( ) )
}
require . Equal ( t ,
[ ] string { "" /* from 'all' postings list */ , "1" , "2" , "a" , "b" } ,
symbols )
2017-09-01 05:38:49 -07:00
2020-09-10 08:05:47 -07:00
values := map [ string ] map [ string ] struct { } { }
for _ , name := range h . postings . LabelNames ( ) {
ss , ok := values [ name ]
if ! ok {
ss = map [ string ] struct { } { }
values [ name ] = ss
}
2023-09-14 07:02:04 -07:00
for _ , value := range h . postings . LabelValues ( ctx , name ) {
2020-09-10 08:05:47 -07:00
ss [ value ] = struct { } { }
}
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] map [ string ] struct { } {
2018-05-07 05:39:54 -07:00
"a" : { "1" : struct { } { } , "2" : struct { } { } } ,
"b" : { "1" : struct { } { } } ,
2020-09-10 08:05:47 -07:00
} , values )
2017-09-01 05:38:49 -07:00
}
// Validate various behaviors brought on by firstChunkID accounting for
// garbage collected chunks.
func TestMemSeries_truncateChunks ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2020-05-06 08:30:00 -07:00
// This is usually taken from the Head, but passing manually here.
2022-01-10 05:36:45 -08:00
chunkDiskMapper , err := chunks . NewChunkDiskMapper ( nil , dir , chunkenc . NewPool ( ) , chunks . DefaultWriteBufferSize , chunks . DefaultWriteQueueSize )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , chunkDiskMapper . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2023-05-25 13:12:32 -07:00
cOpts := chunkOpts {
chunkDiskMapper : chunkDiskMapper ,
chunkRange : 2000 ,
samplesPerChunk : DefaultSamplesPerChunk ,
}
2020-05-06 08:30:00 -07:00
memChunkPool := sync . Pool {
New : func ( ) interface { } {
return & memChunk { }
} ,
}
2024-01-29 03:57:27 -08:00
s := newMemSeries ( labels . FromStrings ( "a" , "b" ) , 1 , 0 , defaultIsolationDisabled )
2017-09-01 05:38:49 -07:00
for i := 0 ; i < 4000 ; i += 5 {
2023-05-25 13:12:32 -07:00
ok , _ := s . append ( int64 ( i ) , float64 ( i ) , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "sample append failed" )
2017-09-01 05:38:49 -07:00
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
s . mmapChunks ( chunkDiskMapper )
2017-09-01 05:38:49 -07:00
// Check that truncate removes half of the chunks and afterwards
// that the ID of the last chunk still gives us the same chunk afterwards.
2020-05-06 08:30:00 -07:00
countBefore := len ( s . mmappedChunks ) + 1 // +1 for the head chunk.
2021-11-17 05:05:10 -08:00
lastID := s . headChunkID ( countBefore - 1 )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
lastChunk , _ , _ , err := s . chunk ( lastID , chunkDiskMapper , & memChunkPool )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NotNil ( t , lastChunk )
2017-09-01 05:38:49 -07:00
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
chk , _ , _ , err := s . chunk ( 0 , chunkDiskMapper , & memChunkPool )
2020-10-29 02:43:23 -07:00
require . NotNil ( t , chk )
require . NoError ( t , err )
2017-09-01 05:38:49 -07:00
2022-09-20 10:05:50 -07:00
s . truncateChunksBefore ( 2000 , 0 )
2017-09-01 05:38:49 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( 2000 ) , s . mmappedChunks [ 0 ] . minTime )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
_ , _ , _ , err = s . chunk ( 0 , chunkDiskMapper , & memChunkPool )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrNotFound , err , "first chunks not gone" )
require . Equal ( t , countBefore / 2 , len ( s . mmappedChunks ) + 1 ) // +1 for the head chunk.
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
chk , _ , _ , err = s . chunk ( lastID , chunkDiskMapper , & memChunkPool )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , lastChunk , chk )
2017-09-01 05:38:49 -07:00
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
func TestMemSeries_truncateChunks_scenarios ( t * testing . T ) {
const chunkRange = 100
const chunkStep = 5
tests := [ ] struct {
name string
2024-09-10 13:32:03 -07:00
headChunks int // the number of head chunks to create on memSeries by appending enough samples
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
mmappedChunks int // the number of mmapped chunks to create on memSeries by appending enough samples
truncateBefore int64 // the mint to pass to truncateChunksBefore()
expectedTruncated int // the number of chunks that we're expecting be truncated and returned by truncateChunksBefore()
expectedHead int // the expected number of head chunks after truncation
expectedMmap int // the expected number of mmapped chunks after truncation
expectedFirstChunkID chunks . HeadChunkID // the expected series.firstChunkID after truncation
} {
{
name : "empty memSeries" ,
truncateBefore : chunkRange * 10 ,
} ,
{
name : "single head chunk, not truncated" ,
headChunks : 1 ,
expectedHead : 1 ,
} ,
{
name : "single head chunk, truncated" ,
headChunks : 1 ,
truncateBefore : chunkRange ,
expectedTruncated : 1 ,
expectedHead : 0 ,
expectedFirstChunkID : 1 ,
} ,
{
name : "2 head chunks, not truncated" ,
headChunks : 2 ,
expectedHead : 2 ,
} ,
{
name : "2 head chunks, first truncated" ,
headChunks : 2 ,
truncateBefore : chunkRange ,
expectedTruncated : 1 ,
expectedHead : 1 ,
expectedFirstChunkID : 1 ,
} ,
{
name : "2 head chunks, everything truncated" ,
headChunks : 2 ,
truncateBefore : chunkRange * 2 ,
expectedTruncated : 2 ,
expectedHead : 0 ,
expectedFirstChunkID : 2 ,
} ,
{
name : "no head chunks, 3 mmap chunks, second mmap truncated" ,
headChunks : 0 ,
mmappedChunks : 3 ,
truncateBefore : chunkRange * 2 ,
expectedTruncated : 2 ,
expectedHead : 0 ,
expectedMmap : 1 ,
expectedFirstChunkID : 2 ,
} ,
{
name : "single head chunk, single mmap chunk, not truncated" ,
headChunks : 1 ,
mmappedChunks : 1 ,
expectedHead : 1 ,
expectedMmap : 1 ,
} ,
{
name : "single head chunk, single mmap chunk, mmap truncated" ,
headChunks : 1 ,
mmappedChunks : 1 ,
truncateBefore : chunkRange ,
expectedTruncated : 1 ,
expectedHead : 1 ,
expectedMmap : 0 ,
expectedFirstChunkID : 1 ,
} ,
{
name : "5 head chunk, 5 mmap chunk, third head truncated" ,
headChunks : 5 ,
mmappedChunks : 5 ,
truncateBefore : chunkRange * 7 ,
expectedTruncated : 7 ,
expectedHead : 3 ,
expectedMmap : 0 ,
expectedFirstChunkID : 7 ,
} ,
{
name : "2 head chunks, 3 mmap chunks, second mmap truncated" ,
headChunks : 2 ,
mmappedChunks : 3 ,
truncateBefore : chunkRange * 2 ,
expectedTruncated : 2 ,
expectedHead : 2 ,
expectedMmap : 1 ,
expectedFirstChunkID : 2 ,
} ,
}
for _ , tc := range tests {
t . Run ( tc . name , func ( t * testing . T ) {
dir := t . TempDir ( )
chunkDiskMapper , err := chunks . NewChunkDiskMapper ( nil , dir , chunkenc . NewPool ( ) , chunks . DefaultWriteBufferSize , chunks . DefaultWriteQueueSize )
require . NoError ( t , err )
defer func ( ) {
require . NoError ( t , chunkDiskMapper . Close ( ) )
} ( )
2024-01-29 03:57:27 -08:00
series := newMemSeries ( labels . EmptyLabels ( ) , 1 , 0 , true )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
cOpts := chunkOpts {
chunkDiskMapper : chunkDiskMapper ,
chunkRange : chunkRange ,
samplesPerChunk : DefaultSamplesPerChunk ,
}
var headStart int
if tc . mmappedChunks > 0 {
headStart = ( tc . mmappedChunks + 1 ) * chunkRange
for i := 0 ; i < ( tc . mmappedChunks + 1 ) * chunkRange ; i += chunkStep {
ok , _ := series . append ( int64 ( i ) , float64 ( i ) , 0 , cOpts )
require . True ( t , ok , "sample append failed" )
}
series . mmapChunks ( chunkDiskMapper )
}
if tc . headChunks == 0 {
series . headChunks = nil
} else {
for i := headStart ; i < chunkRange * ( tc . mmappedChunks + tc . headChunks ) ; i += chunkStep {
ok , _ := series . append ( int64 ( i ) , float64 ( i ) , 0 , cOpts )
require . True ( t , ok , "sample append failed: %d" , i )
}
}
if tc . headChunks > 0 {
require . NotNil ( t , series . headChunks , "head chunk is missing" )
require . Equal ( t , tc . headChunks , series . headChunks . len ( ) , "wrong number of head chunks" )
} else {
require . Nil ( t , series . headChunks , "head chunk is present" )
}
2023-12-07 03:35:01 -08:00
require . Len ( t , series . mmappedChunks , tc . mmappedChunks , "wrong number of mmapped chunks" )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
truncated := series . truncateChunksBefore ( tc . truncateBefore , 0 )
require . Equal ( t , tc . expectedTruncated , truncated , "wrong number of truncated chunks returned" )
2023-12-07 03:35:01 -08:00
require . Len ( t , series . mmappedChunks , tc . expectedMmap , "wrong number of mmappedChunks after truncation" )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
if tc . expectedHead > 0 {
require . NotNil ( t , series . headChunks , "headChunks should is nil after truncation" )
require . Equal ( t , tc . expectedHead , series . headChunks . len ( ) , "wrong number of head chunks after truncation" )
require . Nil ( t , series . headChunks . oldest ( ) . prev , "last head chunk cannot have any next chunk set" )
} else {
require . Nil ( t , series . headChunks , "headChunks should is non-nil after truncation" )
}
if series . headChunks != nil || len ( series . mmappedChunks ) > 0 {
require . GreaterOrEqual ( t , series . maxTime ( ) , tc . truncateBefore , "wrong value of series.maxTime() after truncation" )
} else {
require . Equal ( t , int64 ( math . MinInt64 ) , series . maxTime ( ) , "wrong value of series.maxTime() after truncation" )
}
require . Equal ( t , tc . expectedFirstChunkID , series . firstChunkID , "wrong firstChunkID after truncation" )
} )
}
}
2018-02-07 05:43:21 -08:00
func TestHeadDeleteSeriesWithoutSamples ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
for _ , compress := range [ ] wlog . CompressionType { wlog . CompressionNone , wlog . CompressionSnappy , wlog . CompressionZstd } {
t . Run ( fmt . Sprintf ( "compress=%s" , compress ) , func ( t * testing . T ) {
2019-06-19 06:46:24 -07:00
entries := [ ] interface { } {
2019-09-19 02:15:41 -07:00
[ ] record . RefSeries {
2019-06-19 06:46:24 -07:00
{ Ref : 10 , Labels : labels . FromStrings ( "a" , "1" ) } ,
} ,
2019-09-19 02:15:41 -07:00
[ ] record . RefSample { } ,
[ ] record . RefSeries {
2019-06-19 06:46:24 -07:00
{ Ref : 50 , Labels : labels . FromStrings ( "a" , "2" ) } ,
} ,
2019-09-19 02:15:41 -07:00
[ ] record . RefSample {
2019-06-19 06:46:24 -07:00
{ Ref : 50 , T : 80 , V : 1 } ,
{ Ref : 50 , T : 90 , V : 1 } ,
} ,
}
2022-09-20 10:05:50 -07:00
head , w := newTestHead ( t , 1000 , compress , false )
2019-06-19 06:46:24 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Close ( ) )
2019-06-19 06:46:24 -07:00
} ( )
2018-05-17 06:04:32 -07:00
2023-09-12 12:31:10 -07:00
populateTestWL ( t , w , entries )
2018-02-07 05:43:21 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Init ( math . MinInt64 ) )
2018-02-07 05:43:21 -08:00
2023-09-13 06:43:06 -07:00
require . NoError ( t , head . Delete ( context . Background ( ) , 0 , 100 , labels . MustNewMatcher ( labels . MatchEqual , "a" , "1" ) ) )
2019-06-19 06:46:24 -07:00
} )
}
2018-02-07 05:43:21 -08:00
}
2017-09-04 06:07:30 -07:00
func TestHeadDeleteSimple ( t * testing . T ) {
2019-01-11 08:34:09 -08:00
buildSmpls := func ( s [ ] int64 ) [ ] sample {
ss := make ( [ ] sample , 0 , len ( s ) )
for _ , t := range s {
2023-03-30 10:50:13 -07:00
ss = append ( ss , sample { t : t , f : float64 ( t ) } )
2019-01-11 08:34:09 -08:00
}
return ss
2017-09-04 06:07:30 -07:00
}
2019-01-11 08:34:09 -08:00
smplsAll := buildSmpls ( [ ] int64 { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } )
2019-08-13 01:34:14 -07:00
lblDefault := labels . Label { Name : "a" , Value : "b" }
2022-03-09 14:17:29 -08:00
lblsDefault := labels . FromStrings ( "a" , "b" )
2017-08-28 15:39:17 -07:00
2017-09-04 06:07:30 -07:00
cases := [ ] struct {
2020-01-20 07:38:00 -08:00
dranges tombstones . Intervals
addSamples [ ] sample // Samples to add after delete.
smplsExp [ ] sample
2017-09-04 06:07:30 -07:00
} {
{
2019-09-19 02:15:41 -07:00
dranges : tombstones . Intervals { { Mint : 0 , Maxt : 3 } } ,
2019-01-11 08:34:09 -08:00
smplsExp : buildSmpls ( [ ] int64 { 4 , 5 , 6 , 7 , 8 , 9 } ) ,
2017-09-04 06:07:30 -07:00
} ,
{
2019-09-19 02:15:41 -07:00
dranges : tombstones . Intervals { { Mint : 1 , Maxt : 3 } } ,
2019-01-11 08:34:09 -08:00
smplsExp : buildSmpls ( [ ] int64 { 0 , 4 , 5 , 6 , 7 , 8 , 9 } ) ,
2017-09-04 06:07:30 -07:00
} ,
{
2019-09-19 02:15:41 -07:00
dranges : tombstones . Intervals { { Mint : 1 , Maxt : 3 } , { Mint : 4 , Maxt : 7 } } ,
2019-01-11 08:34:09 -08:00
smplsExp : buildSmpls ( [ ] int64 { 0 , 8 , 9 } ) ,
2017-09-04 06:07:30 -07:00
} ,
{
2019-09-19 02:15:41 -07:00
dranges : tombstones . Intervals { { Mint : 1 , Maxt : 3 } , { Mint : 4 , Maxt : 700 } } ,
2019-01-11 08:34:09 -08:00
smplsExp : buildSmpls ( [ ] int64 { 0 } ) ,
2017-09-04 06:07:30 -07:00
} ,
2019-01-08 09:08:41 -08:00
{ // This case is to ensure that labels and symbols are deleted.
2019-09-19 02:15:41 -07:00
dranges : tombstones . Intervals { { Mint : 0 , Maxt : 9 } } ,
2019-01-11 08:34:09 -08:00
smplsExp : buildSmpls ( [ ] int64 { } ) ,
2017-09-04 06:07:30 -07:00
} ,
2020-01-20 07:38:00 -08:00
{
dranges : tombstones . Intervals { { Mint : 1 , Maxt : 3 } } ,
addSamples : buildSmpls ( [ ] int64 { 11 , 13 , 15 } ) ,
smplsExp : buildSmpls ( [ ] int64 { 0 , 4 , 5 , 6 , 7 , 8 , 9 , 11 , 13 , 15 } ) ,
} ,
{
// After delete, the appended samples in the deleted range should be visible
// as the tombstones are clamped to head min/max time.
dranges : tombstones . Intervals { { Mint : 7 , Maxt : 20 } } ,
addSamples : buildSmpls ( [ ] int64 { 11 , 13 , 15 } ) ,
smplsExp : buildSmpls ( [ ] int64 { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 11 , 13 , 15 } ) ,
} ,
2017-09-04 06:07:30 -07:00
}
2017-08-28 15:39:17 -07:00
2023-07-11 05:57:57 -07:00
for _ , compress := range [ ] wlog . CompressionType { wlog . CompressionNone , wlog . CompressionSnappy , wlog . CompressionZstd } {
t . Run ( fmt . Sprintf ( "compress=%s" , compress ) , func ( t * testing . T ) {
2019-06-19 06:46:24 -07:00
for _ , c := range cases {
2022-09-20 10:05:50 -07:00
head , w := newTestHead ( t , 1000 , compress , false )
require . NoError ( t , head . Init ( 0 ) )
2017-08-28 15:39:17 -07:00
2020-07-30 04:11:13 -07:00
app := head . Appender ( context . Background ( ) )
2019-06-19 06:46:24 -07:00
for _ , smpl := range smplsAll {
2023-03-30 10:50:13 -07:00
_ , err := app . Append ( 0 , lblsDefault , smpl . t , smpl . f )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-06-19 06:46:24 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
2019-06-19 06:46:24 -07:00
// Delete the ranges.
for _ , r := range c . dranges {
2023-09-13 06:43:06 -07:00
require . NoError ( t , head . Delete ( context . Background ( ) , r . Mint , r . Maxt , labels . MustNewMatcher ( labels . MatchEqual , lblDefault . Name , lblDefault . Value ) ) )
2019-06-19 06:46:24 -07:00
}
2019-01-08 09:08:41 -08:00
2020-01-20 07:38:00 -08:00
// Add more samples.
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2020-01-20 07:38:00 -08:00
for _ , smpl := range c . addSamples {
2023-03-30 10:50:13 -07:00
_ , err := app . Append ( 0 , lblsDefault , smpl . t , smpl . f )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-01-20 07:38:00 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-01-20 07:38:00 -08:00
2020-10-19 08:27:08 -07:00
// Compare the samples for both heads - before and after the reloadBlocks.
2022-10-10 08:08:46 -07:00
reloadedW , err := wlog . New ( nil , nil , w . Dir ( ) , compress ) // Use a new wal to ensure deleted samples are gone even after a reloadBlocks.
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-09 06:12:48 -08:00
opts := DefaultHeadOptions ( )
opts . ChunkRange = 1000
opts . ChunkDirRoot = reloadedW . Dir ( )
2022-09-20 10:05:50 -07:00
reloadedHead , err := NewHead ( nil , nil , reloadedW , nil , opts , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , reloadedHead . Init ( 0 ) )
2017-08-28 15:39:17 -07:00
2020-10-19 08:27:08 -07:00
// Compare the query results for both heads - before and after the reloadBlocks.
2020-03-23 07:55:25 -07:00
Outer :
2019-06-19 06:46:24 -07:00
for _ , h := range [ ] * Head { head , reloadedHead } {
q , err := NewBlockQuerier ( h , h . MinTime ( ) , h . MaxTime ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-09-12 03:37:38 -07:00
actSeriesSet := q . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , lblDefault . Name , lblDefault . Value ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , q . Close ( ) )
2020-03-23 07:55:25 -07:00
expSeriesSet := newMockSeriesSet ( [ ] storage . Series {
2023-08-24 06:21:17 -07:00
storage . NewListSeries ( lblsDefault , func ( ) [ ] chunks . Sample {
ss := make ( [ ] chunks . Sample , 0 , len ( c . smplsExp ) )
2020-03-23 07:55:25 -07:00
for _ , s := range c . smplsExp {
ss = append ( ss , s )
}
return ss
} ( ) ,
) ,
} )
2017-08-28 15:39:17 -07:00
2019-06-19 06:46:24 -07:00
for {
eok , rok := expSeriesSet . Next ( ) , actSeriesSet . Next ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , eok , rok )
2017-08-28 15:39:17 -07:00
2019-06-19 06:46:24 -07:00
if ! eok {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
require . NoError ( t , actSeriesSet . Err ( ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , actSeriesSet . Warnings ( ) )
2019-06-19 06:46:24 -07:00
continue Outer
}
expSeries := expSeriesSet . At ( )
actSeries := actSeriesSet . At ( )
2017-08-28 15:39:17 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , expSeries . Labels ( ) , actSeries . Labels ( ) )
2017-08-28 15:39:17 -07:00
2022-09-20 10:16:45 -07:00
smplExp , errExp := storage . ExpandSamples ( expSeries . Iterator ( nil ) , nil )
smplRes , errRes := storage . ExpandSamples ( actSeries . Iterator ( nil ) , nil )
2019-06-19 06:46:24 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , errExp , errRes )
require . Equal ( t , smplExp , smplRes )
2019-06-19 06:46:24 -07:00
}
}
2019-01-08 09:08:41 -08:00
}
2019-06-19 06:46:24 -07:00
} )
2017-09-04 06:07:30 -07:00
}
}
2017-08-28 15:39:17 -07:00
2018-09-17 04:28:55 -07:00
func TestDeleteUntilCurMax ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
hb , _ := newTestHead ( t , 1000000 , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2018-09-17 04:28:55 -07:00
numSamples := int64 ( 10 )
2020-07-30 04:11:13 -07:00
app := hb . Appender ( context . Background ( ) )
2018-09-17 04:28:55 -07:00
smpls := make ( [ ] float64 , numSamples )
for i := int64 ( 0 ) ; i < numSamples ; i ++ {
smpls [ i ] = rand . Float64 ( )
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , i , smpls [ i ] )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-17 04:28:55 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2023-09-13 06:43:06 -07:00
require . NoError ( t , hb . Delete ( context . Background ( ) , 0 , 10000 , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ) )
2018-09-17 04:28:55 -07:00
2020-01-20 07:38:00 -08:00
// Test the series returns no samples. The series is cleared only after compaction.
2018-09-17 04:28:55 -07:00
q , err := NewBlockQuerier ( hb , 0 , 100000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-09-12 03:37:38 -07:00
res := q . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2020-10-29 02:43:23 -07:00
require . True ( t , res . Next ( ) , "series is not present" )
2020-01-20 07:38:00 -08:00
s := res . At ( )
2022-09-20 10:16:45 -07:00
it := s . Iterator ( nil )
2021-11-28 23:54:23 -08:00
require . Equal ( t , chunkenc . ValNone , it . Next ( ) , "expected no samples" )
2020-06-09 09:57:31 -07:00
for res . Next ( ) {
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , res . Err ( ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , res . Warnings ( ) )
2018-09-17 04:28:55 -07:00
// Add again and test for presence.
2020-07-30 04:11:13 -07:00
app = hb . Appender ( context . Background ( ) )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 11 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2018-09-17 04:28:55 -07:00
q , err = NewBlockQuerier ( hb , 0 , 100000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-09-12 03:37:38 -07:00
res = q . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2020-10-29 02:43:23 -07:00
require . True ( t , res . Next ( ) , "series don't exist" )
2018-09-17 04:28:55 -07:00
exps := res . At ( )
2022-09-20 10:16:45 -07:00
it = exps . Iterator ( nil )
2020-07-31 08:03:02 -07:00
resSamples , err := storage . ExpandSamples ( it , newSample )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-08-24 06:21:17 -07:00
require . Equal ( t , [ ] chunks . Sample { sample { 11 , 1 , nil , nil } } , resSamples )
2020-06-09 09:57:31 -07:00
for res . Next ( ) {
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , res . Err ( ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , res . Warnings ( ) )
2018-09-17 04:28:55 -07:00
}
2019-04-09 06:16:24 -07:00
func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint ( t * testing . T ) {
2020-05-06 08:30:00 -07:00
numSamples := 10000
2019-04-09 06:16:24 -07:00
// Enough samples to cause a checkpoint.
2023-07-11 05:57:57 -07:00
hb , w := newTestHead ( t , int64 ( numSamples ) * 10 , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
2019-04-09 06:16:24 -07:00
for i := 0 ; i < numSamples ; i ++ {
2020-07-30 04:11:13 -07:00
app := hb . Appender ( context . Background ( ) )
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , int64 ( i ) , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2019-04-09 06:16:24 -07:00
}
2023-09-13 06:43:06 -07:00
require . NoError ( t , hb . Delete ( context . Background ( ) , 0 , int64 ( numSamples ) , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Truncate ( 1 ) )
require . NoError ( t , hb . Close ( ) )
2019-04-09 06:16:24 -07:00
// Confirm there's been a checkpoint.
2022-10-10 08:08:46 -07:00
cdir , _ , err := wlog . LastCheckpoint ( w . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-04-09 06:16:24 -07:00
// Read in checkpoint and WAL.
recs := readTestWAL ( t , cdir )
2020-05-06 08:30:00 -07:00
recs = append ( recs , readTestWAL ( t , w . Dir ( ) ) ... )
2019-04-09 06:16:24 -07:00
2022-07-19 01:58:52 -07:00
var series , samples , stones , metadata int
2019-04-09 06:16:24 -07:00
for _ , rec := range recs {
switch rec . ( type ) {
2019-09-19 02:15:41 -07:00
case [ ] record . RefSeries :
2019-04-09 06:16:24 -07:00
series ++
2019-09-19 02:15:41 -07:00
case [ ] record . RefSample :
2019-04-09 06:16:24 -07:00
samples ++
2019-09-19 02:15:41 -07:00
case [ ] tombstones . Stone :
2019-04-09 06:16:24 -07:00
stones ++
2022-07-19 01:58:52 -07:00
case [ ] record . RefMetadata :
metadata ++
2019-04-09 06:16:24 -07:00
default :
2021-09-02 08:43:54 -07:00
require . Fail ( t , "unknown record type" )
2019-04-09 06:16:24 -07:00
}
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1 , series )
require . Equal ( t , 9999 , samples )
require . Equal ( t , 1 , stones )
2022-07-19 01:58:52 -07:00
require . Equal ( t , 0 , metadata )
2019-04-09 06:16:24 -07:00
}
2018-09-17 04:28:55 -07:00
func TestDelete_e2e ( t * testing . T ) {
numDatapoints := 1000
numRanges := 1000
timeInterval := int64 ( 2 )
// Create 8 series with 1000 data-points of different ranges, delete and run queries.
lbls := [ ] [ ] labels . Label {
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2018-09-17 04:28:55 -07:00
} ,
}
2023-08-24 06:21:17 -07:00
seriesMap := map [ string ] [ ] chunks . Sample { }
2018-09-17 04:28:55 -07:00
for _ , l := range lbls {
2023-08-24 06:21:17 -07:00
seriesMap [ labels . New ( l ... ) . String ( ) ] = [ ] chunks . Sample { }
2018-09-17 04:28:55 -07:00
}
2020-05-06 08:30:00 -07:00
2023-07-11 05:57:57 -07:00
hb , _ := newTestHead ( t , 100000 , wlog . CompressionNone , false )
2019-03-19 06:31:57 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2019-03-19 06:31:57 -07:00
} ( )
2020-05-06 08:30:00 -07:00
2020-07-30 04:11:13 -07:00
app := hb . Appender ( context . Background ( ) )
2018-09-17 04:28:55 -07:00
for _ , l := range lbls {
ls := labels . New ( l ... )
2023-08-24 06:21:17 -07:00
series := [ ] chunks . Sample { }
2018-09-17 04:28:55 -07:00
ts := rand . Int63n ( 300 )
for i := 0 ; i < numDatapoints ; i ++ {
v := rand . Float64 ( )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , ls , ts , v )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-11-28 23:54:23 -08:00
series = append ( series , sample { ts , v , nil , nil } )
2018-09-17 04:28:55 -07:00
ts += rand . Int63n ( timeInterval ) + 1
}
seriesMap [ labels . New ( l ... ) . String ( ) ] = series
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2018-09-17 04:28:55 -07:00
// Delete a time-range from each-selector.
dels := [ ] struct {
2019-11-18 11:53:33 -08:00
ms [ ] * labels . Matcher
2019-09-19 02:15:41 -07:00
drange tombstones . Intervals
2018-09-17 04:28:55 -07:00
} {
{
2019-11-18 11:53:33 -08:00
ms : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) } ,
2019-09-19 02:15:41 -07:00
drange : tombstones . Intervals { { Mint : 300 , Maxt : 500 } , { Mint : 600 , Maxt : 670 } } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-11-18 11:53:33 -08:00
ms : [ ] * labels . Matcher {
labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ,
labels . MustNewMatcher ( labels . MatchEqual , "job" , "prom-k8s" ) ,
2018-09-17 04:28:55 -07:00
} ,
2019-09-19 02:15:41 -07:00
drange : tombstones . Intervals { { Mint : 300 , Maxt : 500 } , { Mint : 100 , Maxt : 670 } } ,
2018-09-17 04:28:55 -07:00
} ,
{
2019-11-18 11:53:33 -08:00
ms : [ ] * labels . Matcher {
labels . MustNewMatcher ( labels . MatchEqual , "a" , "c" ) ,
labels . MustNewMatcher ( labels . MatchEqual , "instance" , "localhost:9090" ) ,
labels . MustNewMatcher ( labels . MatchEqual , "job" , "prometheus" ) ,
2018-09-17 04:28:55 -07:00
} ,
2019-09-19 02:15:41 -07:00
drange : tombstones . Intervals { { Mint : 300 , Maxt : 400 } , { Mint : 100 , Maxt : 6700 } } ,
2018-09-17 04:28:55 -07:00
} ,
// TODO: Add Regexp Matchers.
}
for _ , del := range dels {
for _ , r := range del . drange {
2023-09-13 06:43:06 -07:00
require . NoError ( t , hb . Delete ( context . Background ( ) , r . Mint , r . Maxt , del . ms ... ) )
2018-09-17 04:28:55 -07:00
}
matched := labels . Slice { }
2022-03-09 14:17:29 -08:00
for _ , l := range lbls {
2018-09-17 04:28:55 -07:00
s := labels . Selector ( del . ms )
2022-03-09 14:17:29 -08:00
ls := labels . New ( l ... )
2018-09-17 04:28:55 -07:00
if s . Matches ( ls ) {
matched = append ( matched , ls )
}
}
sort . Sort ( matched )
for i := 0 ; i < numRanges ; i ++ {
q , err := NewBlockQuerier ( hb , 0 , 100000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-17 04:28:55 -07:00
defer q . Close ( )
2023-09-12 03:37:38 -07:00
ss := q . Select ( context . Background ( ) , true , nil , del . ms ... )
2018-09-17 04:28:55 -07:00
// Build the mockSeriesSet.
2020-02-06 07:58:38 -08:00
matchedSeries := make ( [ ] storage . Series , 0 , len ( matched ) )
2018-09-17 04:28:55 -07:00
for _ , m := range matched {
smpls := seriesMap [ m . String ( ) ]
smpls = deletedSamples ( smpls , del . drange )
// Only append those series for which samples exist as mockSeriesSet
// doesn't skip series with no samples.
2020-07-31 08:03:02 -07:00
// TODO: But sometimes SeriesSet returns an empty chunkenc.Iterator
2018-09-17 04:28:55 -07:00
if len ( smpls ) > 0 {
2020-07-31 08:03:02 -07:00
matchedSeries = append ( matchedSeries , storage . NewListSeries ( m , smpls ) )
2018-09-17 04:28:55 -07:00
}
}
2018-09-21 01:07:35 -07:00
expSs := newMockSeriesSet ( matchedSeries )
2018-09-17 04:28:55 -07:00
// Compare both SeriesSets.
for {
eok , rok := expSs . Next ( ) , ss . Next ( )
// Skip a series if iterator is empty.
if rok {
2022-09-20 10:16:45 -07:00
for ss . At ( ) . Iterator ( nil ) . Next ( ) == chunkenc . ValNone {
2018-09-17 04:28:55 -07:00
rok = ss . Next ( )
if ! rok {
break
}
}
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , eok , rok )
2018-09-17 04:28:55 -07:00
if ! eok {
break
}
sexp := expSs . At ( )
sres := ss . At ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , sexp . Labels ( ) , sres . Labels ( ) )
2022-09-20 10:16:45 -07:00
smplExp , errExp := storage . ExpandSamples ( sexp . Iterator ( nil ) , nil )
smplRes , errRes := storage . ExpandSamples ( sres . Iterator ( nil ) , nil )
2020-10-29 02:43:23 -07:00
require . Equal ( t , errExp , errRes )
require . Equal ( t , smplExp , smplRes )
2018-09-17 04:28:55 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , ss . Err ( ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ss . Warnings ( ) )
2018-09-17 04:28:55 -07:00
}
}
}
2017-05-19 10:24:29 -07:00
2023-08-24 06:21:17 -07:00
func boundedSamples ( full [ ] chunks . Sample , mint , maxt int64 ) [ ] chunks . Sample {
2017-05-05 06:52:07 -07:00
for len ( full ) > 0 {
2019-02-14 05:29:41 -08:00
if full [ 0 ] . T ( ) >= mint {
2017-05-03 10:20:34 -07:00
break
}
2017-05-05 06:52:07 -07:00
full = full [ 1 : ]
2017-05-03 10:20:34 -07:00
}
2017-05-05 06:52:07 -07:00
for i , s := range full {
2017-09-05 02:45:18 -07:00
// labels.Labelinate on the first sample larger than maxt.
2019-02-14 05:29:41 -08:00
if s . T ( ) > maxt {
2017-05-05 06:52:07 -07:00
return full [ : i ]
2017-05-03 10:20:34 -07:00
}
}
2017-05-05 06:52:07 -07:00
// maxt is after highest sample.
return full
2017-05-03 10:20:34 -07:00
}
2017-05-19 10:24:29 -07:00
2023-08-24 06:21:17 -07:00
func deletedSamples ( full [ ] chunks . Sample , dranges tombstones . Intervals ) [ ] chunks . Sample {
ds := make ( [ ] chunks . Sample , 0 , len ( full ) )
2017-05-19 10:24:29 -07:00
Outer :
for _ , s := range full {
for _ , r := range dranges {
2019-09-19 02:15:41 -07:00
if r . InBounds ( s . T ( ) ) {
2017-05-19 10:24:29 -07:00
continue Outer
}
}
ds = append ( ds , s )
}
return ds
}
2017-06-07 04:42:53 -07:00
func TestComputeChunkEndTime ( t * testing . T ) {
2023-08-24 06:21:17 -07:00
cases := map [ string ] struct {
2017-06-07 04:42:53 -07:00
start , cur , max int64
2023-08-24 06:21:17 -07:00
ratioToFull float64
2017-06-07 04:42:53 -07:00
res int64
} {
2023-08-24 06:21:17 -07:00
"exactly 1/4 full, even increment" : {
start : 0 ,
cur : 250 ,
max : 1000 ,
ratioToFull : 4 ,
res : 1000 ,
2017-06-07 04:42:53 -07:00
} ,
2023-08-24 06:21:17 -07:00
"exactly 1/4 full, uneven increment" : {
start : 100 ,
cur : 200 ,
max : 1000 ,
ratioToFull : 4 ,
res : 550 ,
} ,
"decimal ratio to full" : {
start : 5000 ,
cur : 5110 ,
max : 10000 ,
ratioToFull : 4.2 ,
res : 5500 ,
2017-06-07 04:42:53 -07:00
} ,
// Case where we fit floored 0 chunks. Must catch division by 0
// and default to maximum time.
2023-08-24 06:21:17 -07:00
"fit floored 0 chunks" : {
start : 0 ,
cur : 500 ,
max : 1000 ,
ratioToFull : 4 ,
res : 1000 ,
2017-06-07 04:42:53 -07:00
} ,
2018-04-08 02:28:30 -07:00
// Catch division by zero for cur == start. Strictly not a possible case.
2023-08-24 06:21:17 -07:00
"cur == start" : {
start : 100 ,
cur : 100 ,
max : 1000 ,
ratioToFull : 4 ,
res : 104 ,
2017-06-07 04:42:53 -07:00
} ,
}
2023-08-24 06:21:17 -07:00
for testName , tc := range cases {
t . Run ( testName , func ( t * testing . T ) {
got := computeChunkEndTime ( tc . start , tc . cur , tc . max , tc . ratioToFull )
2021-09-02 08:43:54 -07:00
require . Equal ( t , tc . res , got , "(start: %d, cur: %d, max: %d)" , tc . start , tc . cur , tc . max )
2023-08-24 06:21:17 -07:00
} )
2017-06-07 04:42:53 -07:00
}
}
2017-10-25 00:32:06 -07:00
func TestMemSeries_append ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2020-05-06 08:30:00 -07:00
// This is usually taken from the Head, but passing manually here.
2022-01-10 05:36:45 -08:00
chunkDiskMapper , err := chunks . NewChunkDiskMapper ( nil , dir , chunkenc . NewPool ( ) , chunks . DefaultWriteBufferSize , chunks . DefaultWriteQueueSize )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , chunkDiskMapper . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2023-05-25 13:12:32 -07:00
cOpts := chunkOpts {
chunkDiskMapper : chunkDiskMapper ,
chunkRange : 500 ,
samplesPerChunk : DefaultSamplesPerChunk ,
}
2020-05-06 08:30:00 -07:00
2024-01-29 03:57:27 -08:00
s := newMemSeries ( labels . Labels { } , 1 , 0 , defaultIsolationDisabled )
2017-10-25 00:32:06 -07:00
// Add first two samples at the very end of a chunk range and the next two
// on and after it.
// New chunk must correctly be cut at 1000.
2023-05-25 13:12:32 -07:00
ok , chunkCreated := s . append ( 998 , 1 , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "append failed" )
require . True ( t , chunkCreated , "first sample created chunk" )
2017-10-25 00:32:06 -07:00
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . append ( 999 , 2 , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "append failed" )
require . False ( t , chunkCreated , "second sample should use same chunk" )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
s . mmapChunks ( chunkDiskMapper )
2017-10-25 00:32:06 -07:00
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . append ( 1000 , 3 , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "append failed" )
require . True ( t , chunkCreated , "expected new chunk on boundary" )
2017-10-25 00:32:06 -07:00
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . append ( 1001 , 4 , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "append failed" )
require . False ( t , chunkCreated , "second sample should use same chunk" )
2017-10-25 00:32:06 -07:00
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
s . mmapChunks ( chunkDiskMapper )
2023-12-07 03:35:01 -08:00
require . Len ( t , s . mmappedChunks , 1 , "there should be only 1 mmapped chunk" )
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( 998 ) , s . mmappedChunks [ 0 ] . minTime , "wrong chunk range" )
require . Equal ( t , int64 ( 999 ) , s . mmappedChunks [ 0 ] . maxTime , "wrong chunk range" )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
require . Equal ( t , int64 ( 1000 ) , s . headChunks . minTime , "wrong chunk range" )
require . Equal ( t , int64 ( 1001 ) , s . headChunks . maxTime , "wrong chunk range" )
2017-10-25 00:32:06 -07:00
// Fill the range [1000,2000) with many samples. Intermediate chunks should be cut
// at approximately 120 samples per chunk.
for i := 1 ; i < 1000 ; i ++ {
2023-05-25 13:12:32 -07:00
ok , _ := s . append ( 1001 + int64 ( i ) , float64 ( i ) , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "append failed" )
2017-10-25 00:32:06 -07:00
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
s . mmapChunks ( chunkDiskMapper )
2017-10-25 00:32:06 -07:00
2020-10-29 02:43:23 -07:00
require . Greater ( t , len ( s . mmappedChunks ) + 1 , 7 , "expected intermediate chunks" )
2017-10-25 00:32:06 -07:00
// All chunks but the first and last should now be moderately full.
2020-05-06 08:30:00 -07:00
for i , c := range s . mmappedChunks [ 1 : ] {
chk , err := chunkDiskMapper . Chunk ( c . ref )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Greater ( t , chk . NumSamples ( ) , 100 , "unexpected small chunk %d of length %d" , i , chk . NumSamples ( ) )
2017-10-25 00:32:06 -07:00
}
}
2017-12-13 12:58:21 -08:00
2022-07-06 09:44:17 -07:00
func TestMemSeries_appendHistogram ( t * testing . T ) {
dir := t . TempDir ( )
// This is usually taken from the Head, but passing manually here.
chunkDiskMapper , err := chunks . NewChunkDiskMapper ( nil , dir , chunkenc . NewPool ( ) , chunks . DefaultWriteBufferSize , chunks . DefaultWriteQueueSize )
require . NoError ( t , err )
defer func ( ) {
require . NoError ( t , chunkDiskMapper . Close ( ) )
} ( )
2023-05-25 13:12:32 -07:00
cOpts := chunkOpts {
chunkDiskMapper : chunkDiskMapper ,
chunkRange : int64 ( 1000 ) ,
samplesPerChunk : DefaultSamplesPerChunk ,
}
2022-07-06 09:44:17 -07:00
2024-01-29 03:57:27 -08:00
s := newMemSeries ( labels . Labels { } , 1 , 0 , defaultIsolationDisabled )
2022-07-06 09:44:17 -07:00
2023-02-10 03:39:33 -08:00
histograms := tsdbutil . GenerateTestHistograms ( 4 )
2022-07-06 09:44:17 -07:00
histogramWithOneMoreBucket := histograms [ 3 ] . Copy ( )
histogramWithOneMoreBucket . Count ++
histogramWithOneMoreBucket . Sum += 1.23
histogramWithOneMoreBucket . PositiveSpans [ 1 ] . Length = 3
histogramWithOneMoreBucket . PositiveBuckets = append ( histogramWithOneMoreBucket . PositiveBuckets , 1 )
// Add first two samples at the very end of a chunk range and the next two
// on and after it.
// New chunk must correctly be cut at 1000.
2023-05-25 13:12:32 -07:00
ok , chunkCreated := s . appendHistogram ( 998 , histograms [ 0 ] , 0 , cOpts )
2022-07-06 09:44:17 -07:00
require . True ( t , ok , "append failed" )
require . True ( t , chunkCreated , "first sample created chunk" )
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . appendHistogram ( 999 , histograms [ 1 ] , 0 , cOpts )
2022-07-06 09:44:17 -07:00
require . True ( t , ok , "append failed" )
require . False ( t , chunkCreated , "second sample should use same chunk" )
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . appendHistogram ( 1000 , histograms [ 2 ] , 0 , cOpts )
2022-07-06 09:44:17 -07:00
require . True ( t , ok , "append failed" )
require . True ( t , chunkCreated , "expected new chunk on boundary" )
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . appendHistogram ( 1001 , histograms [ 3 ] , 0 , cOpts )
2022-07-06 09:44:17 -07:00
require . True ( t , ok , "append failed" )
require . False ( t , chunkCreated , "second sample should use same chunk" )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
s . mmapChunks ( chunkDiskMapper )
2023-12-07 03:35:01 -08:00
require . Len ( t , s . mmappedChunks , 1 , "there should be only 1 mmapped chunk" )
2022-07-06 09:44:17 -07:00
require . Equal ( t , int64 ( 998 ) , s . mmappedChunks [ 0 ] . minTime , "wrong chunk range" )
require . Equal ( t , int64 ( 999 ) , s . mmappedChunks [ 0 ] . maxTime , "wrong chunk range" )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
require . Equal ( t , int64 ( 1000 ) , s . headChunks . minTime , "wrong chunk range" )
require . Equal ( t , int64 ( 1001 ) , s . headChunks . maxTime , "wrong chunk range" )
2022-07-06 09:44:17 -07:00
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . appendHistogram ( 1002 , histogramWithOneMoreBucket , 0 , cOpts )
2022-07-06 09:44:17 -07:00
require . True ( t , ok , "append failed" )
require . False ( t , chunkCreated , "third sample should trigger a re-encoded chunk" )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
s . mmapChunks ( chunkDiskMapper )
2023-12-07 03:35:01 -08:00
require . Len ( t , s . mmappedChunks , 1 , "there should be only 1 mmapped chunk" )
2022-07-06 09:44:17 -07:00
require . Equal ( t , int64 ( 998 ) , s . mmappedChunks [ 0 ] . minTime , "wrong chunk range" )
require . Equal ( t , int64 ( 999 ) , s . mmappedChunks [ 0 ] . maxTime , "wrong chunk range" )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
require . Equal ( t , int64 ( 1000 ) , s . headChunks . minTime , "wrong chunk range" )
require . Equal ( t , int64 ( 1002 ) , s . headChunks . maxTime , "wrong chunk range" )
2022-07-06 09:44:17 -07:00
}
2022-04-20 05:54:20 -07:00
func TestMemSeries_append_atVariableRate ( t * testing . T ) {
const samplesPerChunk = 120
dir := t . TempDir ( )
// This is usually taken from the Head, but passing manually here.
chunkDiskMapper , err := chunks . NewChunkDiskMapper ( nil , dir , chunkenc . NewPool ( ) , chunks . DefaultWriteBufferSize , chunks . DefaultWriteQueueSize )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , chunkDiskMapper . Close ( ) )
} )
2023-05-25 13:12:32 -07:00
cOpts := chunkOpts {
chunkDiskMapper : chunkDiskMapper ,
chunkRange : DefaultBlockDuration ,
samplesPerChunk : samplesPerChunk ,
}
2022-04-20 05:54:20 -07:00
2024-01-29 03:57:27 -08:00
s := newMemSeries ( labels . Labels { } , 1 , 0 , defaultIsolationDisabled )
2022-04-20 05:54:20 -07:00
// At this slow rate, we will fill the chunk in two block durations.
slowRate := ( DefaultBlockDuration * 2 ) / samplesPerChunk
var nextTs int64
var totalAppendedSamples int
for i := 0 ; i < samplesPerChunk / 4 ; i ++ {
2023-05-25 13:12:32 -07:00
ok , _ := s . append ( nextTs , float64 ( i ) , 0 , cOpts )
2022-04-20 05:54:20 -07:00
require . Truef ( t , ok , "slow sample %d was not appended" , i )
nextTs += slowRate
totalAppendedSamples ++
}
require . Equal ( t , DefaultBlockDuration , s . nextAt , "after appending a samplesPerChunk/4 samples at a slow rate, we should aim to cut a new block at the default block duration %d, but it's set to %d" , DefaultBlockDuration , s . nextAt )
// Suddenly, the rate increases and we receive a sample every millisecond.
for i := 0 ; i < math . MaxUint16 ; i ++ {
2023-05-25 13:12:32 -07:00
ok , _ := s . append ( nextTs , float64 ( i ) , 0 , cOpts )
2022-04-20 05:54:20 -07:00
require . Truef ( t , ok , "quick sample %d was not appended" , i )
nextTs ++
totalAppendedSamples ++
}
2023-05-25 13:12:32 -07:00
ok , chunkCreated := s . append ( DefaultBlockDuration , float64 ( 0 ) , 0 , cOpts )
2022-04-20 05:54:20 -07:00
require . True ( t , ok , "new chunk sample was not appended" )
require . True ( t , chunkCreated , "sample at block duration timestamp should create a new chunk" )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
s . mmapChunks ( chunkDiskMapper )
2022-04-20 05:54:20 -07:00
var totalSamplesInChunks int
for i , c := range s . mmappedChunks {
totalSamplesInChunks += int ( c . numSamples )
require . LessOrEqualf ( t , c . numSamples , uint16 ( 2 * samplesPerChunk ) , "mmapped chunk %d has more than %d samples" , i , 2 * samplesPerChunk )
}
require . Equal ( t , totalAppendedSamples , totalSamplesInChunks , "wrong number of samples in %d mmapped chunks" , len ( s . mmappedChunks ) )
}
2017-12-13 12:58:21 -08:00
func TestGCChunkAccess ( t * testing . T ) {
// Put a chunk, select it. GC it and then access it.
2022-09-27 01:22:22 -07:00
const chunkRange = 1000
2023-07-11 05:57:57 -07:00
h , _ := newTestHead ( t , chunkRange , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2017-12-13 12:58:21 -08:00
2023-05-25 13:12:32 -07:00
cOpts := chunkOpts {
chunkDiskMapper : h . chunkDiskMapper ,
chunkRange : chunkRange ,
samplesPerChunk : DefaultSamplesPerChunk ,
}
2017-12-13 12:58:21 -08:00
h . initTime ( 0 )
2020-05-20 06:22:08 -07:00
s , _ , _ := h . getOrCreate ( 1 , labels . FromStrings ( "a" , "1" ) )
2020-02-06 04:55:00 -08:00
// Appending 2 samples for the first chunk.
2023-05-25 13:12:32 -07:00
ok , chunkCreated := s . append ( 0 , 0 , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . True ( t , chunkCreated , "chunks was not created" )
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . append ( 999 , 999 , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . False ( t , chunkCreated , "chunks was created" )
2020-02-06 04:55:00 -08:00
// A new chunks should be created here as it's beyond the chunk range.
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . append ( 1000 , 1000 , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . True ( t , chunkCreated , "chunks was not created" )
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . append ( 1999 , 1999 , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . False ( t , chunkCreated , "chunks was created" )
2017-12-13 12:58:21 -08:00
idx := h . indexRange ( 0 , 1500 )
var (
2022-06-28 08:03:26 -07:00
chunks [ ] chunks . Meta
builder labels . ScratchBuilder
2017-12-13 12:58:21 -08:00
)
2022-12-15 10:19:15 -08:00
require . NoError ( t , idx . Series ( 1 , & builder , & chunks ) )
2017-12-13 12:58:21 -08:00
2022-12-15 10:19:15 -08:00
require . Equal ( t , labels . FromStrings ( "a" , "1" ) , builder . Labels ( ) )
2023-12-07 03:35:01 -08:00
require . Len ( t , chunks , 2 )
2017-12-13 12:58:21 -08:00
2020-05-22 02:03:23 -07:00
cr , err := h . chunksRange ( 0 , 1500 , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-11-28 02:14:29 -08:00
_ , _ , err = cr . ChunkOrIterable ( chunks [ 0 ] )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-11-28 02:14:29 -08:00
_ , _ , err = cr . ChunkOrIterable ( chunks [ 1 ] )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-13 12:58:21 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 1500 ) ) // Remove a chunk.
2017-12-13 12:58:21 -08:00
2023-11-28 02:14:29 -08:00
_ , _ , err = cr . ChunkOrIterable ( chunks [ 0 ] )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrNotFound , err )
2023-11-28 02:14:29 -08:00
_ , _ , err = cr . ChunkOrIterable ( chunks [ 1 ] )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-13 12:58:21 -08:00
}
func TestGCSeriesAccess ( t * testing . T ) {
// Put a series, select it. GC it and then access it.
2022-09-27 01:22:22 -07:00
const chunkRange = 1000
2023-07-11 05:57:57 -07:00
h , _ := newTestHead ( t , chunkRange , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2017-12-13 12:58:21 -08:00
2023-05-25 13:12:32 -07:00
cOpts := chunkOpts {
chunkDiskMapper : h . chunkDiskMapper ,
chunkRange : chunkRange ,
samplesPerChunk : DefaultSamplesPerChunk ,
}
2017-12-13 12:58:21 -08:00
h . initTime ( 0 )
2020-05-20 06:22:08 -07:00
s , _ , _ := h . getOrCreate ( 1 , labels . FromStrings ( "a" , "1" ) )
2020-02-06 04:55:00 -08:00
// Appending 2 samples for the first chunk.
2023-05-25 13:12:32 -07:00
ok , chunkCreated := s . append ( 0 , 0 , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . True ( t , chunkCreated , "chunks was not created" )
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . append ( 999 , 999 , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . False ( t , chunkCreated , "chunks was created" )
2020-02-06 04:55:00 -08:00
// A new chunks should be created here as it's beyond the chunk range.
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . append ( 1000 , 1000 , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . True ( t , chunkCreated , "chunks was not created" )
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . append ( 1999 , 1999 , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . False ( t , chunkCreated , "chunks was created" )
2017-12-13 12:58:21 -08:00
idx := h . indexRange ( 0 , 2000 )
var (
2022-06-28 08:03:26 -07:00
chunks [ ] chunks . Meta
builder labels . ScratchBuilder
2017-12-13 12:58:21 -08:00
)
2022-12-15 10:19:15 -08:00
require . NoError ( t , idx . Series ( 1 , & builder , & chunks ) )
2017-12-13 12:58:21 -08:00
2022-12-15 10:19:15 -08:00
require . Equal ( t , labels . FromStrings ( "a" , "1" ) , builder . Labels ( ) )
2023-12-07 03:35:01 -08:00
require . Len ( t , chunks , 2 )
2017-12-13 12:58:21 -08:00
2020-05-22 02:03:23 -07:00
cr , err := h . chunksRange ( 0 , 2000 , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-11-28 02:14:29 -08:00
_ , _ , err = cr . ChunkOrIterable ( chunks [ 0 ] )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-11-28 02:14:29 -08:00
_ , _ , err = cr . ChunkOrIterable ( chunks [ 1 ] )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-13 12:58:21 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 2000 ) ) // Remove the series.
2017-12-13 12:58:21 -08:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , ( * memSeries ) ( nil ) , h . series . getByID ( 1 ) )
2017-12-13 12:58:21 -08:00
2023-11-28 02:14:29 -08:00
_ , _ , err = cr . ChunkOrIterable ( chunks [ 0 ] )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrNotFound , err )
2023-11-28 02:14:29 -08:00
_ , _ , err = cr . ChunkOrIterable ( chunks [ 1 ] )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrNotFound , err )
2017-12-13 12:58:21 -08:00
}
2018-06-28 06:04:07 -07:00
2018-09-17 09:58:42 -07:00
func TestUncommittedSamplesNotLostOnTruncate ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
h , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2018-09-17 09:58:42 -07:00
h . initTime ( 0 )
2020-07-30 04:48:47 -07:00
app := h . appender ( )
2018-09-17 09:58:42 -07:00
lset := labels . FromStrings ( "a" , "1" )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , lset , 2100 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-17 09:58:42 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 2000 ) )
require . NotNil ( t , h . series . getByHash ( lset . Hash ( ) , lset ) , "series should not have been garbage collected" )
2018-09-17 09:58:42 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2018-09-17 09:58:42 -07:00
q , err := NewBlockQuerier ( h , 1500 , 2500 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-17 09:58:42 -07:00
defer q . Close ( )
2023-09-12 03:37:38 -07:00
ss := q . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "1" ) )
2023-12-07 03:35:01 -08:00
require . True ( t , ss . Next ( ) )
2020-06-09 09:57:31 -07:00
for ss . Next ( ) {
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , ss . Err ( ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ss . Warnings ( ) )
2018-09-17 09:58:42 -07:00
}
func TestRemoveSeriesAfterRollbackAndTruncate ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
h , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2018-09-17 09:58:42 -07:00
h . initTime ( 0 )
2020-07-30 04:48:47 -07:00
app := h . appender ( )
2018-09-17 09:58:42 -07:00
lset := labels . FromStrings ( "a" , "1" )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , lset , 2100 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-17 09:58:42 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 2000 ) )
require . NotNil ( t , h . series . getByHash ( lset . Hash ( ) , lset ) , "series should not have been garbage collected" )
2018-09-17 09:58:42 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Rollback ( ) )
2018-09-17 09:58:42 -07:00
q , err := NewBlockQuerier ( h , 1500 , 2500 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-17 09:58:42 -07:00
2023-09-12 03:37:38 -07:00
ss := q . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "1" ) )
2023-12-07 03:35:01 -08:00
require . False ( t , ss . Next ( ) )
require . Empty ( t , ss . Warnings ( ) )
2021-07-20 01:47:20 -07:00
require . NoError ( t , q . Close ( ) )
2018-09-17 09:58:42 -07:00
// Truncate again, this time the series should be deleted
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 2050 ) )
require . Equal ( t , ( * memSeries ) ( nil ) , h . series . getByHash ( lset . Hash ( ) , lset ) )
2018-09-17 09:58:42 -07:00
}
2018-06-28 06:04:07 -07:00
func TestHead_LogRollback ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
for _ , compress := range [ ] wlog . CompressionType { wlog . CompressionNone , wlog . CompressionSnappy , wlog . CompressionZstd } {
t . Run ( fmt . Sprintf ( "compress=%s" , compress ) , func ( t * testing . T ) {
2022-09-20 10:05:50 -07:00
h , w := newTestHead ( t , 1000 , compress , false )
2019-06-19 06:46:24 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2019-06-19 06:46:24 -07:00
} ( )
2018-05-17 06:04:32 -07:00
2020-07-30 04:11:13 -07:00
app := h . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 1 , 2 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-28 06:04:07 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Rollback ( ) )
2019-06-19 06:46:24 -07:00
recs := readTestWAL ( t , w . Dir ( ) )
2018-05-17 06:04:32 -07:00
2023-12-07 03:35:01 -08:00
require . Len ( t , recs , 1 )
2018-06-28 06:04:07 -07:00
2019-09-19 02:15:41 -07:00
series , ok := recs [ 0 ] . ( [ ] record . RefSeries )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "expected series record but got %+v" , recs [ 0 ] )
require . Equal ( t , [ ] record . RefSeries { { Ref : 1 , Labels : labels . FromStrings ( "a" , "b" ) } } , series )
2019-06-19 06:46:24 -07:00
} )
}
2018-06-28 06:04:07 -07:00
}
2018-11-30 03:37:04 -08:00
2019-06-12 07:10:37 -07:00
// TestWalRepair_DecodingError ensures that a repair is run for an error
// when decoding a record.
func TestWalRepair_DecodingError ( t * testing . T ) {
2019-09-19 02:15:41 -07:00
var enc record . Encoder
2018-11-30 03:37:04 -08:00
for name , test := range map [ string ] struct {
corrFunc func ( rec [ ] byte ) [ ] byte // Func that applies the corruption to a record.
rec [ ] byte
totalRecs int
expRecs int
} {
"decode_series" : {
func ( rec [ ] byte ) [ ] byte {
return rec [ : 3 ]
} ,
2019-09-19 02:15:41 -07:00
enc . Series ( [ ] record . RefSeries { { Ref : 1 , Labels : labels . FromStrings ( "a" , "b" ) } } , [ ] byte { } ) ,
2018-11-30 03:37:04 -08:00
9 ,
5 ,
} ,
"decode_samples" : {
func ( rec [ ] byte ) [ ] byte {
return rec [ : 3 ]
} ,
2019-09-19 02:15:41 -07:00
enc . Samples ( [ ] record . RefSample { { Ref : 0 , T : 99 , V : 1 } } , [ ] byte { } ) ,
2018-11-30 03:37:04 -08:00
9 ,
5 ,
} ,
"decode_tombstone" : {
func ( rec [ ] byte ) [ ] byte {
return rec [ : 3 ]
} ,
2019-09-19 02:15:41 -07:00
enc . Tombstones ( [ ] tombstones . Stone { { Ref : 1 , Intervals : tombstones . Intervals { } } } , [ ] byte { } ) ,
2018-11-30 03:37:04 -08:00
9 ,
5 ,
} ,
} {
2023-07-11 05:57:57 -07:00
for _ , compress := range [ ] wlog . CompressionType { wlog . CompressionNone , wlog . CompressionSnappy , wlog . CompressionZstd } {
t . Run ( fmt . Sprintf ( "%s,compress=%s" , name , compress ) , func ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2018-11-30 03:37:04 -08:00
2019-06-19 06:46:24 -07:00
// Fill the wal and corrupt it.
{
2022-10-10 08:08:46 -07:00
w , err := wlog . New ( nil , nil , filepath . Join ( dir , "wal" ) , compress )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-06-19 06:46:24 -07:00
for i := 1 ; i <= test . totalRecs ; i ++ {
// At this point insert a corrupted record.
if i - 1 == test . expRecs {
2020-10-29 02:43:23 -07:00
require . NoError ( t , w . Log ( test . corrFunc ( test . rec ) ) )
2019-06-19 06:46:24 -07:00
continue
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , w . Log ( test . rec ) )
2019-06-14 08:39:22 -07:00
}
2021-02-09 06:12:48 -08:00
opts := DefaultHeadOptions ( )
opts . ChunkRange = 1
opts . ChunkDirRoot = w . Dir ( )
2022-09-20 10:05:50 -07:00
h , err := NewHead ( nil , nil , w , nil , opts , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( h . metrics . walCorruptionsTotal ) )
2019-06-19 06:46:24 -07:00
initErr := h . Init ( math . MinInt64 )
2019-06-14 08:39:22 -07:00
2023-11-16 10:54:41 -08:00
var cerr * wlog . CorruptionErr
require . ErrorAs ( t , initErr , & cerr , "reading the wal didn't return corruption error" )
2021-10-25 23:06:25 -07:00
require . NoError ( t , h . Close ( ) ) // Head will close the wal as well.
2019-06-19 06:46:24 -07:00
}
2018-11-30 03:37:04 -08:00
2019-06-19 06:46:24 -07:00
// Open the db to trigger a repair.
{
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , nil , DefaultOptions ( ) , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-06-19 06:46:24 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-06-19 06:46:24 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . walCorruptionsTotal ) )
2019-06-19 06:46:24 -07:00
}
2018-11-30 03:37:04 -08:00
2019-06-19 06:46:24 -07:00
// Read the wal content after the repair.
{
2022-10-10 08:08:46 -07:00
sr , err := wlog . NewSegmentsReader ( filepath . Join ( dir , "wal" ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-06-19 06:46:24 -07:00
defer sr . Close ( )
2022-10-10 08:08:46 -07:00
r := wlog . NewReader ( sr )
2018-11-30 03:37:04 -08:00
2019-06-19 06:46:24 -07:00
var actRec int
for r . Next ( ) {
actRec ++
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , r . Err ( ) )
require . Equal ( t , test . expRecs , actRec , "Wrong number of intact records" )
2019-06-14 08:39:22 -07:00
}
2019-06-19 06:46:24 -07:00
} )
}
2018-11-30 03:37:04 -08:00
}
}
2019-06-07 03:35:02 -07:00
2023-10-13 05:21:35 -07:00
// TestWblRepair_DecodingError ensures that a repair is run for an error
// when decoding a record.
func TestWblRepair_DecodingError ( t * testing . T ) {
var enc record . Encoder
corrFunc := func ( rec [ ] byte ) [ ] byte {
return rec [ : 3 ]
}
rec := enc . Samples ( [ ] record . RefSample { { Ref : 0 , T : 99 , V : 1 } } , [ ] byte { } )
totalRecs := 9
expRecs := 5
dir := t . TempDir ( )
// Fill the wbl and corrupt it.
{
wal , err := wlog . New ( nil , nil , filepath . Join ( dir , "wal" ) , wlog . CompressionNone )
require . NoError ( t , err )
wbl , err := wlog . New ( nil , nil , filepath . Join ( dir , "wbl" ) , wlog . CompressionNone )
require . NoError ( t , err )
for i := 1 ; i <= totalRecs ; i ++ {
// At this point insert a corrupted record.
if i - 1 == expRecs {
require . NoError ( t , wbl . Log ( corrFunc ( rec ) ) )
continue
}
require . NoError ( t , wbl . Log ( rec ) )
}
opts := DefaultHeadOptions ( )
opts . ChunkRange = 1
opts . ChunkDirRoot = wal . Dir ( )
opts . OutOfOrderCapMax . Store ( 30 )
opts . OutOfOrderTimeWindow . Store ( 1000 * time . Minute . Milliseconds ( ) )
h , err := NewHead ( nil , nil , wal , wbl , opts , nil )
require . NoError ( t , err )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( h . metrics . walCorruptionsTotal ) )
initErr := h . Init ( math . MinInt64 )
2023-11-16 10:54:41 -08:00
var elb * errLoadWbl
require . ErrorAs ( t , initErr , & elb ) // Wbl errors are wrapped into errLoadWbl, make sure we can unwrap it.
2023-10-13 05:21:35 -07:00
2023-11-16 10:54:41 -08:00
var cerr * wlog . CorruptionErr
require . ErrorAs ( t , initErr , & cerr , "reading the wal didn't return corruption error" )
2023-10-13 05:21:35 -07:00
require . NoError ( t , h . Close ( ) ) // Head will close the wal as well.
}
// Open the db to trigger a repair.
{
db , err := Open ( dir , nil , nil , DefaultOptions ( ) , nil )
require . NoError ( t , err )
defer func ( ) {
require . NoError ( t , db . Close ( ) )
} ( )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . walCorruptionsTotal ) )
}
// Read the wbl content after the repair.
{
sr , err := wlog . NewSegmentsReader ( filepath . Join ( dir , "wbl" ) )
require . NoError ( t , err )
defer sr . Close ( )
r := wlog . NewReader ( sr )
var actRec int
for r . Next ( ) {
actRec ++
}
require . NoError ( t , r . Err ( ) )
require . Equal ( t , expRecs , actRec , "Wrong number of intact records" )
}
}
2020-05-06 08:30:00 -07:00
func TestHeadReadWriterRepair ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2019-06-07 03:35:02 -07:00
2020-05-29 07:38:41 -07:00
const chunkRange = 1000
2020-05-06 08:30:00 -07:00
walDir := filepath . Join ( dir , "wal" )
// Fill the chunk segments and corrupt it.
{
2023-07-11 05:57:57 -07:00
w , err := wlog . New ( nil , nil , walDir , wlog . CompressionNone )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
2021-02-09 06:12:48 -08:00
opts := DefaultHeadOptions ( )
opts . ChunkRange = chunkRange
opts . ChunkDirRoot = dir
2022-09-20 10:05:50 -07:00
opts . ChunkWriteQueueSize = 1 // We need to set this option so that we use the async queue. Upstream prometheus uses the queue directly.
h , err := NewHead ( nil , nil , w , nil , opts , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( h . metrics . mmapChunkCorruptionTotal ) )
require . NoError ( t , h . Init ( math . MinInt64 ) )
2020-05-06 08:30:00 -07:00
2023-05-25 13:12:32 -07:00
cOpts := chunkOpts {
chunkDiskMapper : h . chunkDiskMapper ,
chunkRange : chunkRange ,
samplesPerChunk : DefaultSamplesPerChunk ,
}
2020-05-20 06:22:08 -07:00
s , created , _ := h . getOrCreate ( 1 , labels . FromStrings ( "a" , "1" ) )
2020-10-29 02:43:23 -07:00
require . True ( t , created , "series was not created" )
2020-05-06 08:30:00 -07:00
for i := 0 ; i < 7 ; i ++ {
2023-05-25 13:12:32 -07:00
ok , chunkCreated := s . append ( int64 ( i * chunkRange ) , float64 ( i * chunkRange ) , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . True ( t , chunkCreated , "chunk was not created" )
2023-05-25 13:12:32 -07:00
ok , chunkCreated = s . append ( int64 ( i * chunkRange ) + chunkRange - 1 , float64 ( i * chunkRange ) , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "series append failed" )
require . False ( t , chunkCreated , "chunk was created" )
2022-01-10 05:36:45 -08:00
h . chunkDiskMapper . CutNewFile ( )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
s . mmapChunks ( h . chunkDiskMapper )
2020-05-06 08:30:00 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
2022-01-10 05:36:45 -08:00
// Verify that there are 6 segment files.
// It should only be 6 because the last call to .CutNewFile() won't
// take effect without another chunk being written.
2022-04-27 02:24:36 -07:00
files , err := os . ReadDir ( mmappedChunksDir ( dir ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Len ( t , files , 6 )
2020-05-06 08:30:00 -07:00
// Corrupt the 4th file by writing a random byte to series ref.
2021-10-22 01:06:44 -07:00
f , err := os . OpenFile ( filepath . Join ( mmappedChunksDir ( dir ) , files [ 3 ] . Name ( ) ) , os . O_WRONLY , 0 o666 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
n , err := f . WriteAt ( [ ] byte { 67 , 88 } , chunks . HeadChunkFileHeaderSize + 2 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 2 , n )
require . NoError ( t , f . Close ( ) )
2020-05-06 08:30:00 -07:00
}
// Open the db to trigger a repair.
{
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , nil , DefaultOptions ( ) , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . mmapChunkCorruptionTotal ) )
2020-05-06 08:30:00 -07:00
}
// Verify that there are 3 segment files after the repair.
// The segments from the corrupt segment should be removed.
{
2022-04-27 02:24:36 -07:00
files , err := os . ReadDir ( mmappedChunksDir ( dir ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Len ( t , files , 3 )
2020-05-06 08:30:00 -07:00
}
}
func TestNewWalSegmentOnTruncate ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
h , wal := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2019-06-07 03:35:02 -07:00
add := func ( ts int64 ) {
2020-07-30 04:11:13 -07:00
app := h . Appender ( context . Background ( ) )
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , ts , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2019-06-07 03:35:02 -07:00
}
add ( 0 )
2022-10-10 08:08:46 -07:00
_ , last , err := wlog . Segments ( wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , last )
2019-06-07 03:35:02 -07:00
add ( 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 1 ) )
2022-10-10 08:08:46 -07:00
_ , last , err = wlog . Segments ( wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 1 , last )
2019-06-07 03:35:02 -07:00
add ( 2 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Truncate ( 2 ) )
2022-10-10 08:08:46 -07:00
_ , last , err = wlog . Segments ( wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 2 , last )
2019-06-07 03:35:02 -07:00
}
2020-01-20 03:05:27 -08:00
func TestAddDuplicateLabelName ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
h , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-01-20 03:05:27 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-01-20 03:05:27 -08:00
} ( )
add := func ( labels labels . Labels , labelName string ) {
2020-07-30 04:11:13 -07:00
app := h . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . Error ( t , err )
require . Equal ( t , fmt . Sprintf ( ` label name "%s" is not unique: invalid sample ` , labelName ) , err . Error ( ) )
2020-01-20 03:05:27 -08:00
}
2022-03-09 14:17:29 -08:00
add ( labels . FromStrings ( "a" , "c" , "a" , "b" ) , "a" )
add ( labels . FromStrings ( "a" , "c" , "a" , "c" ) , "a" )
add ( labels . FromStrings ( "__name__" , "up" , "job" , "prometheus" , "le" , "500" , "le" , "400" , "unit" , "s" ) , "le" )
2020-01-20 03:05:27 -08:00
}
2020-01-21 11:30:20 -08:00
2020-02-17 10:37:09 -08:00
func TestMemSeriesIsolation ( t * testing . T ) {
2021-11-19 02:11:32 -08:00
if defaultIsolationDisabled {
t . Skip ( "skipping test since tsdb isolation is disabled" )
}
2020-02-17 10:37:09 -08:00
// Put a series, select it. GC it and then access it.
2020-05-06 08:30:00 -07:00
lastValue := func ( h * Head , maxAppendID uint64 ) int {
idx , err := h . Index ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-17 10:37:09 -08:00
2021-07-20 01:47:20 -07:00
iso := h . iso . State ( math . MinInt64 , math . MaxInt64 )
2020-02-12 11:22:27 -08:00
iso . maxAppendID = maxAppendID
2020-02-17 10:37:09 -08:00
2020-05-22 02:03:23 -07:00
chunks , err := h . chunksRange ( math . MinInt64 , math . MaxInt64 , iso )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-31 08:03:02 -07:00
// Hm.. here direct block chunk querier might be required?
querier := blockQuerier {
blockBaseQuerier : & blockBaseQuerier {
index : idx ,
chunks : chunks ,
tombstones : tombstones . NewMemTombstones ( ) ,
mint : 0 ,
maxt : 10000 ,
} ,
2020-02-17 10:37:09 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-17 10:37:09 -08:00
defer querier . Close ( )
2023-09-12 03:37:38 -07:00
ss := querier . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2020-06-09 09:57:31 -07:00
_ , seriesSet , ws , err := expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ws )
2020-02-17 10:37:09 -08:00
for _ , series := range seriesSet {
2023-03-30 10:50:13 -07:00
return int ( series [ len ( series ) - 1 ] . f )
2020-02-17 10:37:09 -08:00
}
return - 1
}
2020-05-06 08:30:00 -07:00
addSamples := func ( h * Head ) int {
i := 1
for ; i <= 1000 ; i ++ {
var app storage . Appender
// To initialize bounds.
if h . MinTime ( ) == math . MaxInt64 {
app = & initAppender { head : h }
} else {
2020-07-30 04:48:47 -07:00
a := h . appender ( )
2020-05-06 08:30:00 -07:00
a . cleanupAppendIDsBelow = 0
app = a
}
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , int64 ( i ) , float64 ( i ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
h . mmapHeadChunks ( )
2020-02-17 10:37:09 -08:00
}
2020-05-06 08:30:00 -07:00
return i
}
2020-02-17 10:37:09 -08:00
2020-05-06 08:30:00 -07:00
testIsolation := func ( h * Head , i int ) {
2020-02-17 10:37:09 -08:00
}
2020-05-06 08:30:00 -07:00
// Test isolation without restart of Head.
2023-07-11 05:57:57 -07:00
hb , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
i := addSamples ( hb )
testIsolation ( hb , i )
2020-02-12 11:22:27 -08:00
// Test simple cases in different chunks when no appendID cleanup has been performed.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 10 , lastValue ( hb , 10 ) )
require . Equal ( t , 130 , lastValue ( hb , 130 ) )
require . Equal ( t , 160 , lastValue ( hb , 160 ) )
require . Equal ( t , 240 , lastValue ( hb , 240 ) )
require . Equal ( t , 500 , lastValue ( hb , 500 ) )
require . Equal ( t , 750 , lastValue ( hb , 750 ) )
require . Equal ( t , 995 , lastValue ( hb , 995 ) )
require . Equal ( t , 999 , lastValue ( hb , 999 ) )
2020-02-17 10:37:09 -08:00
2020-02-12 11:22:27 -08:00
// Cleanup appendIDs below 500.
2020-07-30 04:48:47 -07:00
app := hb . appender ( )
2020-04-17 11:51:03 -07:00
app . cleanupAppendIDsBelow = 500
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , int64 ( i ) , float64 ( i ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2020-02-17 10:37:09 -08:00
i ++
2020-02-12 11:22:27 -08:00
// We should not get queries with a maxAppendID below 500 after the cleanup,
// but they only take the remaining appendIDs into account.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 499 , lastValue ( hb , 10 ) )
require . Equal ( t , 499 , lastValue ( hb , 130 ) )
require . Equal ( t , 499 , lastValue ( hb , 160 ) )
require . Equal ( t , 499 , lastValue ( hb , 240 ) )
require . Equal ( t , 500 , lastValue ( hb , 500 ) )
require . Equal ( t , 995 , lastValue ( hb , 995 ) )
require . Equal ( t , 999 , lastValue ( hb , 999 ) )
2020-02-17 10:37:09 -08:00
2020-02-12 11:22:27 -08:00
// Cleanup appendIDs below 1000, which means the sample buffer is
// the only thing with appendIDs.
2020-07-30 04:48:47 -07:00
app = hb . appender ( )
2020-04-17 11:51:03 -07:00
app . cleanupAppendIDsBelow = 1000
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , int64 ( i ) , float64 ( i ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 999 , lastValue ( hb , 998 ) )
require . Equal ( t , 999 , lastValue ( hb , 999 ) )
require . Equal ( t , 1000 , lastValue ( hb , 1000 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1001 ) )
require . Equal ( t , 1002 , lastValue ( hb , 1002 ) )
require . Equal ( t , 1002 , lastValue ( hb , 1003 ) )
2020-02-12 11:22:27 -08:00
i ++
// Cleanup appendIDs below 1001, but with a rollback.
2020-07-30 04:48:47 -07:00
app = hb . appender ( )
2020-04-17 11:51:03 -07:00
app . cleanupAppendIDsBelow = 1001
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , int64 ( i ) , float64 ( i ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Rollback ( ) )
require . Equal ( t , 1000 , lastValue ( hb , 999 ) )
require . Equal ( t , 1000 , lastValue ( hb , 1000 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1001 ) )
require . Equal ( t , 1002 , lastValue ( hb , 1002 ) )
require . Equal ( t , 1002 , lastValue ( hb , 1003 ) )
2020-05-06 08:30:00 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2020-05-06 08:30:00 -07:00
// Test isolation with restart of Head. This is to verify the num samples of chunks after m-map chunk replay.
2023-07-11 05:57:57 -07:00
hb , w := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
i = addSamples ( hb )
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2020-05-06 08:30:00 -07:00
2023-07-11 05:57:57 -07:00
wal , err := wlog . NewSize ( nil , nil , w . Dir ( ) , 32768 , wlog . CompressionNone )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-09 06:12:48 -08:00
opts := DefaultHeadOptions ( )
opts . ChunkRange = 1000
2022-10-10 08:08:46 -07:00
opts . ChunkDirRoot = wal . Dir ( )
hb , err = NewHead ( nil , nil , wal , nil , opts , nil )
2020-10-29 02:43:23 -07:00
defer func ( ) { require . NoError ( t , hb . Close ( ) ) } ( )
require . NoError ( t , err )
require . NoError ( t , hb . Init ( 0 ) )
2020-05-06 08:30:00 -07:00
// No appends after restarting. Hence all should return the last value.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1000 , lastValue ( hb , 10 ) )
require . Equal ( t , 1000 , lastValue ( hb , 130 ) )
require . Equal ( t , 1000 , lastValue ( hb , 160 ) )
require . Equal ( t , 1000 , lastValue ( hb , 240 ) )
require . Equal ( t , 1000 , lastValue ( hb , 500 ) )
2020-05-06 08:30:00 -07:00
// Cleanup appendIDs below 1000, which means the sample buffer is
// the only thing with appendIDs.
2020-07-30 04:48:47 -07:00
app = hb . appender ( )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , int64 ( i ) , float64 ( i ) )
2020-05-06 08:30:00 -07:00
i ++
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , 1001 , lastValue ( hb , 998 ) )
require . Equal ( t , 1001 , lastValue ( hb , 999 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1000 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1001 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1002 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1003 ) )
2020-05-06 08:30:00 -07:00
// Cleanup appendIDs below 1002, but with a rollback.
2020-07-30 04:48:47 -07:00
app = hb . appender ( )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , int64 ( i ) , float64 ( i ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Rollback ( ) )
require . Equal ( t , 1001 , lastValue ( hb , 999 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1000 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1001 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1002 ) )
require . Equal ( t , 1001 , lastValue ( hb , 1003 ) )
2020-02-17 10:37:09 -08:00
}
2020-02-12 11:22:27 -08:00
func TestIsolationRollback ( t * testing . T ) {
2021-11-19 02:11:32 -08:00
if defaultIsolationDisabled {
t . Skip ( "skipping test since tsdb isolation is disabled" )
}
2020-02-12 11:22:27 -08:00
// Rollback after a failed append and test if the low watermark has progressed anyway.
2023-07-11 05:57:57 -07:00
hb , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2020-02-17 10:37:09 -08:00
2020-07-30 04:11:13 -07:00
app := hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , uint64 ( 1 ) , hb . iso . lowWatermark ( ) )
2020-02-17 10:37:09 -08:00
2020-07-30 04:11:13 -07:00
app = hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 1 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" , "foo" , "baz" ) , 2 , 2 )
2020-10-29 02:43:23 -07:00
require . Error ( t , err )
require . NoError ( t , app . Rollback ( ) )
require . Equal ( t , uint64 ( 2 ) , hb . iso . lowWatermark ( ) )
2020-02-17 10:37:09 -08:00
2020-07-30 04:11:13 -07:00
app = hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 3 , 3 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Equal ( t , uint64 ( 3 ) , hb . iso . lowWatermark ( ) , "Low watermark should proceed to 3 even if append #2 was rolled back." )
2020-02-12 11:22:27 -08:00
}
2020-02-17 10:37:09 -08:00
2020-02-12 11:22:27 -08:00
func TestIsolationLowWatermarkMonotonous ( t * testing . T ) {
2021-11-19 02:11:32 -08:00
if defaultIsolationDisabled {
t . Skip ( "skipping test since tsdb isolation is disabled" )
}
2023-07-11 05:57:57 -07:00
hb , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2020-02-17 10:37:09 -08:00
2020-07-30 04:11:13 -07:00
app1 := hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app1 . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app1 . Commit ( ) )
require . Equal ( t , uint64 ( 1 ) , hb . iso . lowWatermark ( ) , "Low watermark should by 1 after 1st append." )
2020-02-12 11:22:27 -08:00
2020-07-30 04:11:13 -07:00
app1 = hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app1 . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 1 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , uint64 ( 2 ) , hb . iso . lowWatermark ( ) , "Low watermark should be two, even if append is not committed yet." )
2020-02-12 11:22:27 -08:00
2020-07-30 04:11:13 -07:00
app2 := hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app2 . Append ( 0 , labels . FromStrings ( "foo" , "baz" ) , 1 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app2 . Commit ( ) )
require . Equal ( t , uint64 ( 2 ) , hb . iso . lowWatermark ( ) , "Low watermark should stay two because app1 is not committed yet." )
2020-02-12 11:22:27 -08:00
2021-07-20 01:47:20 -07:00
is := hb . iso . State ( math . MinInt64 , math . MaxInt64 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , uint64 ( 2 ) , hb . iso . lowWatermark ( ) , "After simulated read (iso state retrieved), low watermark should stay at 2." )
2020-02-17 10:37:09 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , app1 . Commit ( ) )
require . Equal ( t , uint64 ( 2 ) , hb . iso . lowWatermark ( ) , "Even after app1 is committed, low watermark should stay at 2 because read is still ongoing." )
2020-01-21 11:30:20 -08:00
2020-02-12 11:22:27 -08:00
is . Close ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , uint64 ( 3 ) , hb . iso . lowWatermark ( ) , "After read has finished (iso state closed), low watermark should jump to three." )
2020-01-21 11:30:20 -08:00
}
2020-02-28 17:39:26 -08:00
func TestIsolationAppendIDZeroIsNoop ( t * testing . T ) {
2021-11-19 02:11:32 -08:00
if defaultIsolationDisabled {
t . Skip ( "skipping test since tsdb isolation is disabled" )
}
2023-07-11 05:57:57 -07:00
h , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2020-02-28 17:39:26 -08:00
h . initTime ( 0 )
2023-05-25 13:12:32 -07:00
cOpts := chunkOpts {
chunkDiskMapper : h . chunkDiskMapper ,
chunkRange : h . chunkRange . Load ( ) ,
samplesPerChunk : DefaultSamplesPerChunk ,
}
2020-05-20 06:22:08 -07:00
s , _ , _ := h . getOrCreate ( 1 , labels . FromStrings ( "a" , "1" ) )
2020-02-28 17:39:26 -08:00
2023-05-25 13:12:32 -07:00
ok , _ := s . append ( 0 , 0 , 0 , cOpts )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "Series append failed." )
2023-10-21 05:38:01 -07:00
require . Equal ( t , 0 , int ( s . txs . txIDCount ) , "Series should not have an appendID after append with appendID=0." )
2020-02-28 17:39:26 -08:00
}
2020-03-16 05:59:22 -07:00
func TestHeadSeriesChunkRace ( t * testing . T ) {
for i := 0 ; i < 1000 ; i ++ {
testHeadSeriesChunkRace ( t )
}
}
2020-04-17 11:51:03 -07:00
func TestIsolationWithoutAdd ( t * testing . T ) {
2021-11-19 02:11:32 -08:00
if defaultIsolationDisabled {
t . Skip ( "skipping test since tsdb isolation is disabled" )
}
2023-07-11 05:57:57 -07:00
hb , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , hb . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2020-04-17 11:51:03 -07:00
2020-07-30 04:11:13 -07:00
app := hb . Appender ( context . Background ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-04-17 11:51:03 -07:00
2020-07-30 04:11:13 -07:00
app = hb . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "baz" ) , 1 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2020-04-17 11:51:03 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , hb . iso . lastAppendID ( ) , hb . iso . lowWatermark ( ) , "High watermark should be equal to the low watermark" )
2020-04-17 11:51:03 -07:00
}
2020-05-06 08:30:00 -07:00
func TestOutOfOrderSamplesMetric ( t * testing . T ) {
2024-03-03 11:44:12 -08:00
for name , scenario := range sampleTypeScenarios {
t . Run ( name , func ( t * testing . T ) {
testOutOfOrderSamplesMetric ( t , scenario )
} )
}
}
func testOutOfOrderSamplesMetric ( t * testing . T , scenario sampleTypeScenario ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2020-05-06 08:30:00 -07:00
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , nil , DefaultOptions ( ) , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
db . DisableCompactions ( )
2024-03-03 11:44:12 -08:00
appendSample := func ( appender storage . Appender , ts int64 ) ( storage . SeriesRef , error ) {
ref , _ , err := scenario . appendFunc ( appender , labels . FromStrings ( "a" , "b" ) , ts , 99 )
return ref , err
}
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2020-05-06 08:30:00 -07:00
for i := 1 ; i <= 5 ; i ++ {
2024-03-03 11:44:12 -08:00
_ , err = appendSample ( app , int64 ( i ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-05-06 08:30:00 -07:00
// Test out of order metric.
2024-03-03 11:44:12 -08:00
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples . WithLabelValues ( scenario . sampleType ) ) )
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2024-03-03 11:44:12 -08:00
_ , err = appendSample ( app , 2 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfOrderSample , err )
2024-03-03 11:44:12 -08:00
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples . WithLabelValues ( scenario . sampleType ) ) )
2020-05-06 08:30:00 -07:00
2024-03-03 11:44:12 -08:00
_ , err = appendSample ( app , 3 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfOrderSample , err )
2024-03-03 11:44:12 -08:00
require . Equal ( t , 2.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples . WithLabelValues ( scenario . sampleType ) ) )
2020-05-06 08:30:00 -07:00
2024-03-03 11:44:12 -08:00
_ , err = appendSample ( app , 4 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfOrderSample , err )
2024-03-03 11:44:12 -08:00
require . Equal ( t , 3.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples . WithLabelValues ( scenario . sampleType ) ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-05-06 08:30:00 -07:00
// Compact Head to test out of bound metric.
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2024-03-03 11:44:12 -08:00
_ , err = appendSample ( app , DefaultBlockDuration * 2 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2020-05-06 08:30:00 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( math . MinInt64 ) , db . head . minValidTime . Load ( ) )
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2024-08-18 02:27:04 -07:00
require . Positive ( t , db . head . minValidTime . Load ( ) )
2020-05-06 08:30:00 -07:00
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2024-03-03 11:44:12 -08:00
_ , err = appendSample ( app , db . head . minValidTime . Load ( ) - 2 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfBounds , err )
2024-03-03 11:44:12 -08:00
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfBoundSamples . WithLabelValues ( scenario . sampleType ) ) )
2020-05-06 08:30:00 -07:00
2024-03-03 11:44:12 -08:00
_ , err = appendSample ( app , db . head . minValidTime . Load ( ) - 1 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfBounds , err )
2024-03-03 11:44:12 -08:00
require . Equal ( t , 2.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfBoundSamples . WithLabelValues ( scenario . sampleType ) ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-05-06 08:30:00 -07:00
// Some more valid samples for out of order.
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2020-05-06 08:30:00 -07:00
for i := 1 ; i <= 5 ; i ++ {
2024-03-03 11:44:12 -08:00
_ , err = appendSample ( app , db . head . minValidTime . Load ( ) + DefaultBlockDuration + int64 ( i ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-05-06 08:30:00 -07:00
// Test out of order metric.
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2024-03-03 11:44:12 -08:00
_ , err = appendSample ( app , db . head . minValidTime . Load ( ) + DefaultBlockDuration + 2 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfOrderSample , err )
2024-03-03 11:44:12 -08:00
require . Equal ( t , 4.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples . WithLabelValues ( scenario . sampleType ) ) )
2020-05-06 08:30:00 -07:00
2024-03-03 11:44:12 -08:00
_ , err = appendSample ( app , db . head . minValidTime . Load ( ) + DefaultBlockDuration + 3 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfOrderSample , err )
2024-03-03 11:44:12 -08:00
require . Equal ( t , 5.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples . WithLabelValues ( scenario . sampleType ) ) )
2020-05-06 08:30:00 -07:00
2024-03-03 11:44:12 -08:00
_ , err = appendSample ( app , db . head . minValidTime . Load ( ) + DefaultBlockDuration + 4 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfOrderSample , err )
2024-03-03 11:44:12 -08:00
require . Equal ( t , 6.0 , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples . WithLabelValues ( scenario . sampleType ) ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-05-06 08:30:00 -07:00
}
func testHeadSeriesChunkRace ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
h , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-05-06 08:30:00 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2020-05-06 08:30:00 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Init ( 0 ) )
2020-07-30 04:11:13 -07:00
app := h . Appender ( context . Background ( ) )
2020-03-16 05:59:22 -07:00
2021-02-18 04:07:00 -08:00
s2 , err := app . Append ( 0 , labels . FromStrings ( "foo2" , "bar" ) , 5 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-03-16 05:59:22 -07:00
for ts := int64 ( 6 ) ; ts < 11 ; ts ++ {
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( s2 , labels . EmptyLabels ( ) , ts , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-03-16 05:59:22 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-03-16 05:59:22 -07:00
var wg sync . WaitGroup
matcher := labels . MustNewMatcher ( labels . MatchEqual , "" , "" )
q , err := NewBlockQuerier ( h , 18 , 22 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-03-16 05:59:22 -07:00
defer q . Close ( )
wg . Add ( 1 )
go func ( ) {
h . updateMinMaxTime ( 20 , 25 )
h . gc ( )
wg . Done ( )
} ( )
2023-09-12 03:37:38 -07:00
ss := q . Select ( context . Background ( ) , false , nil , matcher )
2020-06-09 09:57:31 -07:00
for ss . Next ( ) {
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , ss . Err ( ) )
2020-03-16 05:59:22 -07:00
wg . Wait ( )
}
2020-05-06 08:30:00 -07:00
2020-05-30 05:50:09 -07:00
func TestHeadLabelNamesValuesWithMinMaxRange ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-05-30 05:50:09 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Close ( ) )
2020-05-30 05:50:09 -07:00
} ( )
const (
firstSeriesTimestamp int64 = 100
secondSeriesTimestamp int64 = 200
lastSeriesTimestamp int64 = 300
)
var (
2021-10-22 01:06:44 -07:00
seriesTimestamps = [ ] int64 {
firstSeriesTimestamp ,
2020-05-30 05:50:09 -07:00
secondSeriesTimestamp ,
lastSeriesTimestamp ,
}
expectedLabelNames = [ ] string { "a" , "b" , "c" }
expectedLabelValues = [ ] string { "d" , "e" , "f" }
2023-09-14 01:39:51 -07:00
ctx = context . Background ( )
2020-05-30 05:50:09 -07:00
)
2023-09-14 07:02:04 -07:00
app := head . Appender ( ctx )
2020-05-30 05:50:09 -07:00
for i , name := range expectedLabelNames {
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( name , expectedLabelValues [ i ] ) , seriesTimestamps [ i ] , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-30 05:50:09 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2023-12-07 03:35:01 -08:00
require . Equal ( t , firstSeriesTimestamp , head . MinTime ( ) )
require . Equal ( t , lastSeriesTimestamp , head . MaxTime ( ) )
2020-05-30 05:50:09 -07:00
2021-10-22 01:06:44 -07:00
testCases := [ ] struct {
2020-05-30 05:50:09 -07:00
name string
mint int64
maxt int64
expectedNames [ ] string
expectedValues [ ] string
} {
{ "maxt less than head min" , head . MaxTime ( ) - 10 , head . MinTime ( ) - 10 , [ ] string { } , [ ] string { } } ,
{ "mint less than head max" , head . MaxTime ( ) + 10 , head . MinTime ( ) + 10 , [ ] string { } , [ ] string { } } ,
{ "mint and maxt outside head" , head . MaxTime ( ) + 10 , head . MinTime ( ) - 10 , [ ] string { } , [ ] string { } } ,
{ "mint and maxt within head" , head . MaxTime ( ) - 10 , head . MinTime ( ) + 10 , expectedLabelNames , expectedLabelValues } ,
}
for _ , tt := range testCases {
t . Run ( tt . name , func ( t * testing . T ) {
headIdxReader := head . indexRange ( tt . mint , tt . maxt )
2023-09-14 01:39:51 -07:00
actualLabelNames , err := headIdxReader . LabelNames ( ctx )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , tt . expectedNames , actualLabelNames )
2020-05-30 05:50:09 -07:00
if len ( tt . expectedValues ) > 0 {
for i , name := range expectedLabelNames {
2023-09-14 07:02:04 -07:00
actualLabelValue , err := headIdxReader . SortedLabelValues ( ctx , name )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , [ ] string { tt . expectedValues [ i ] } , actualLabelValue )
2020-05-30 05:50:09 -07:00
}
}
} )
}
}
2020-07-22 02:57:38 -07:00
2021-02-09 09:38:35 -08:00
func TestHeadLabelValuesWithMatchers ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2021-07-20 05:38:08 -07:00
t . Cleanup ( func ( ) { require . NoError ( t , head . Close ( ) ) } )
2021-02-09 09:38:35 -08:00
2023-09-14 07:02:04 -07:00
ctx := context . Background ( )
2021-02-09 09:38:35 -08:00
app := head . Appender ( context . Background ( ) )
for i := 0 ; i < 100 ; i ++ {
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings (
"tens" , fmt . Sprintf ( "value%d" , i / 10 ) ,
"unique" , fmt . Sprintf ( "value%d" , i ) ,
) , 100 , 0 )
2021-02-09 09:38:35 -08:00
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
2024-01-24 01:47:56 -08:00
var uniqueWithout30s [ ] string
for i := 0 ; i < 100 ; i ++ {
if i / 10 != 3 {
uniqueWithout30s = append ( uniqueWithout30s , fmt . Sprintf ( "value%d" , i ) )
}
}
sort . Strings ( uniqueWithout30s )
2021-10-22 01:06:44 -07:00
testCases := [ ] struct {
2021-02-09 09:38:35 -08:00
name string
labelName string
matchers [ ] * labels . Matcher
expectedValues [ ] string
} {
{
name : "get tens based on unique id" ,
labelName : "tens" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "unique" , "value35" ) } ,
expectedValues : [ ] string { "value3" } ,
} , {
name : "get unique ids based on a ten" ,
labelName : "unique" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "tens" , "value1" ) } ,
expectedValues : [ ] string { "value10" , "value11" , "value12" , "value13" , "value14" , "value15" , "value16" , "value17" , "value18" , "value19" } ,
} , {
name : "get tens by pattern matching on unique id" ,
labelName : "tens" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchRegexp , "unique" , "value[5-7]5" ) } ,
expectedValues : [ ] string { "value5" , "value6" , "value7" } ,
} , {
2024-01-24 01:47:56 -08:00
name : "get tens by matching for presence of unique label" ,
2021-02-09 09:38:35 -08:00
labelName : "tens" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchNotEqual , "unique" , "" ) } ,
expectedValues : [ ] string { "value0" , "value1" , "value2" , "value3" , "value4" , "value5" , "value6" , "value7" , "value8" , "value9" } ,
2024-01-24 01:47:56 -08:00
} , {
name : "get unique IDs based on tens not being equal to a certain value, while not empty" ,
labelName : "unique" ,
matchers : [ ] * labels . Matcher {
labels . MustNewMatcher ( labels . MatchNotEqual , "tens" , "value3" ) ,
labels . MustNewMatcher ( labels . MatchNotEqual , "tens" , "" ) ,
} ,
expectedValues : uniqueWithout30s ,
2021-02-09 09:38:35 -08:00
} ,
}
for _ , tt := range testCases {
t . Run ( tt . name , func ( t * testing . T ) {
headIdxReader := head . indexRange ( 0 , 200 )
2023-09-14 07:02:04 -07:00
actualValues , err := headIdxReader . SortedLabelValues ( ctx , tt . labelName , tt . matchers ... )
2021-02-09 09:38:35 -08:00
require . NoError ( t , err )
require . Equal ( t , tt . expectedValues , actualValues )
2023-09-14 07:02:04 -07:00
actualValues , err = headIdxReader . LabelValues ( ctx , tt . labelName , tt . matchers ... )
2021-02-09 09:38:35 -08:00
sort . Strings ( actualValues )
require . NoError ( t , err )
require . Equal ( t , tt . expectedValues , actualValues )
} )
}
}
2021-07-20 05:38:08 -07:00
func TestHeadLabelNamesWithMatchers ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2021-07-20 05:38:08 -07:00
defer func ( ) {
require . NoError ( t , head . Close ( ) )
} ( )
app := head . Appender ( context . Background ( ) )
for i := 0 ; i < 100 ; i ++ {
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings (
"unique" , fmt . Sprintf ( "value%d" , i ) ,
) , 100 , 0 )
2021-07-20 05:38:08 -07:00
require . NoError ( t , err )
if i % 10 == 0 {
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings (
"tens" , fmt . Sprintf ( "value%d" , i / 10 ) ,
"unique" , fmt . Sprintf ( "value%d" , i ) ,
) , 100 , 0 )
2021-07-20 05:38:08 -07:00
require . NoError ( t , err )
}
if i % 20 == 0 {
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings (
"tens" , fmt . Sprintf ( "value%d" , i / 10 ) ,
"twenties" , fmt . Sprintf ( "value%d" , i / 20 ) ,
"unique" , fmt . Sprintf ( "value%d" , i ) ,
) , 100 , 0 )
2021-07-20 05:38:08 -07:00
require . NoError ( t , err )
}
}
require . NoError ( t , app . Commit ( ) )
testCases := [ ] struct {
name string
labelName string
matchers [ ] * labels . Matcher
expectedNames [ ] string
} {
{
name : "get with non-empty unique: all" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchNotEqual , "unique" , "" ) } ,
expectedNames : [ ] string { "tens" , "twenties" , "unique" } ,
} , {
name : "get with unique ending in 1: only unique" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchRegexp , "unique" , "value.*1" ) } ,
expectedNames : [ ] string { "unique" } ,
} , {
name : "get with unique = value20: all" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "unique" , "value20" ) } ,
expectedNames : [ ] string { "tens" , "twenties" , "unique" } ,
} , {
name : "get tens = 1: unique & tens" ,
matchers : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "tens" , "value1" ) } ,
expectedNames : [ ] string { "tens" , "unique" } ,
} ,
}
for _ , tt := range testCases {
t . Run ( tt . name , func ( t * testing . T ) {
headIdxReader := head . indexRange ( 0 , 200 )
2023-09-14 01:39:51 -07:00
actualNames , err := headIdxReader . LabelNames ( context . Background ( ) , tt . matchers ... )
2021-07-20 05:38:08 -07:00
require . NoError ( t , err )
require . Equal ( t , tt . expectedNames , actualNames )
} )
}
}
2024-01-29 03:57:27 -08:00
func TestHeadShardedPostings ( t * testing . T ) {
headOpts := newTestHeadDefaultOptions ( 1000 , false )
headOpts . EnableSharding = true
head , _ := newTestHeadWithOptions ( t , wlog . CompressionNone , headOpts )
defer func ( ) {
require . NoError ( t , head . Close ( ) )
} ( )
ctx := context . Background ( )
// Append some series.
app := head . Appender ( ctx )
for i := 0 ; i < 100 ; i ++ {
_ , err := app . Append ( 0 , labels . FromStrings ( "unique" , fmt . Sprintf ( "value%d" , i ) , "const" , "1" ) , 100 , 0 )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
ir := head . indexRange ( 0 , 200 )
// List all postings for a given label value. This is what we expect to get
// in output from all shards.
p , err := ir . Postings ( ctx , "const" , "1" )
require . NoError ( t , err )
var expected [ ] storage . SeriesRef
for p . Next ( ) {
expected = append ( expected , p . At ( ) )
}
require . NoError ( t , p . Err ( ) )
require . NotEmpty ( t , expected )
// Query the same postings for each shard.
const shardCount = uint64 ( 4 )
actualShards := make ( map [ uint64 ] [ ] storage . SeriesRef )
actualPostings := make ( [ ] storage . SeriesRef , 0 , len ( expected ) )
for shardIndex := uint64 ( 0 ) ; shardIndex < shardCount ; shardIndex ++ {
p , err = ir . Postings ( ctx , "const" , "1" )
require . NoError ( t , err )
p = ir . ShardedPostings ( p , shardIndex , shardCount )
for p . Next ( ) {
ref := p . At ( )
actualShards [ shardIndex ] = append ( actualShards [ shardIndex ] , ref )
actualPostings = append ( actualPostings , ref )
}
require . NoError ( t , p . Err ( ) )
}
// We expect the postings merged out of shards is the exact same of the non sharded ones.
require . ElementsMatch ( t , expected , actualPostings )
// We expect the series in each shard are the expected ones.
for shardIndex , ids := range actualShards {
for _ , id := range ids {
var lbls labels . ScratchBuilder
require . NoError ( t , ir . Series ( id , & lbls , nil ) )
require . Equal ( t , shardIndex , labels . StableHash ( lbls . Labels ( ) ) % shardCount )
}
}
}
2020-07-22 02:57:38 -07:00
func TestErrReuseAppender ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2020-07-22 02:57:38 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , head . Close ( ) )
2020-07-22 02:57:38 -07:00
} ( )
2020-07-30 04:11:13 -07:00
app := head . Appender ( context . Background ( ) )
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "test" , "test" ) , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Error ( t , app . Commit ( ) )
require . Error ( t , app . Rollback ( ) )
2020-07-22 02:57:38 -07:00
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "test" , "test" ) , 1 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Rollback ( ) )
require . Error ( t , app . Rollback ( ) )
require . Error ( t , app . Commit ( ) )
2020-07-22 02:57:38 -07:00
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "test" , "test" ) , 2 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . Error ( t , app . Rollback ( ) )
require . Error ( t , app . Commit ( ) )
2020-07-22 02:57:38 -07:00
2020-07-30 04:11:13 -07:00
app = head . Appender ( context . Background ( ) )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "test" , "test" ) , 3 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Rollback ( ) )
require . Error ( t , app . Commit ( ) )
require . Error ( t , app . Rollback ( ) )
2020-07-22 02:57:38 -07:00
}
2020-11-25 05:03:30 -08:00
func TestHeadMintAfterTruncation ( t * testing . T ) {
chunkRange := int64 ( 2000 )
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , chunkRange , wlog . CompressionNone , false )
2020-11-25 05:03:30 -08:00
app := head . Appender ( context . Background ( ) )
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 100 , 100 )
2020-11-25 05:03:30 -08:00
require . NoError ( t , err )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 4000 , 200 )
2020-11-25 05:03:30 -08:00
require . NoError ( t , err )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 8000 , 300 )
2020-11-25 05:03:30 -08:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
// Truncating outside the appendable window and actual mint being outside
// appendable window should leave mint at the actual mint.
require . NoError ( t , head . Truncate ( 3500 ) )
require . Equal ( t , int64 ( 4000 ) , head . MinTime ( ) )
require . Equal ( t , int64 ( 4000 ) , head . minValidTime . Load ( ) )
2021-05-13 18:34:11 -07:00
// After truncation outside the appendable window if the actual min time
2020-11-25 05:03:30 -08:00
// is in the appendable window then we should leave mint at the start of appendable window.
require . NoError ( t , head . Truncate ( 5000 ) )
require . Equal ( t , head . appendableMinValidTime ( ) , head . MinTime ( ) )
require . Equal ( t , head . appendableMinValidTime ( ) , head . minValidTime . Load ( ) )
// If the truncation time is inside the appendable window, then the min time
// should be the truncation time.
require . NoError ( t , head . Truncate ( 7500 ) )
require . Equal ( t , int64 ( 7500 ) , head . MinTime ( ) )
require . Equal ( t , int64 ( 7500 ) , head . minValidTime . Load ( ) )
require . NoError ( t , head . Close ( ) )
2021-03-16 02:47:45 -07:00
}
func TestHeadExemplars ( t * testing . T ) {
chunkRange := int64 ( 2000 )
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , chunkRange , wlog . CompressionNone , false )
2021-03-16 02:47:45 -07:00
app := head . Appender ( context . Background ( ) )
2024-02-15 06:19:54 -08:00
l := labels . FromStrings ( "trace_id" , "123" )
2021-03-16 02:47:45 -07:00
// It is perfectly valid to add Exemplars before the current start time -
// histogram buckets that haven't been update in a while could still be
// exported exemplars from an hour ago.
2022-03-09 14:17:29 -08:00
ref , err := app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 100 , 100 )
2021-03-16 02:47:45 -07:00
require . NoError ( t , err )
_ , err = app . AppendExemplar ( ref , l , exemplar . Exemplar {
Labels : l ,
HasTs : true ,
Ts : - 1000 ,
Value : 1 ,
} )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . NoError ( t , head . Close ( ) )
2020-11-25 05:03:30 -08:00
}
2021-02-09 09:38:35 -08:00
func BenchmarkHeadLabelValuesWithMatchers ( b * testing . B ) {
chunkRange := int64 ( 2000 )
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( b , chunkRange , wlog . CompressionNone , false )
2021-02-09 09:38:35 -08:00
b . Cleanup ( func ( ) { require . NoError ( b , head . Close ( ) ) } )
2023-09-14 07:02:04 -07:00
ctx := context . Background ( )
2021-02-09 09:38:35 -08:00
app := head . Appender ( context . Background ( ) )
metricCount := 1000000
for i := 0 ; i < metricCount ; i ++ {
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings (
"a_unique" , fmt . Sprintf ( "value%d" , i ) ,
"b_tens" , fmt . Sprintf ( "value%d" , i / ( metricCount / 10 ) ) ,
"c_ninety" , fmt . Sprintf ( "value%d" , i / ( metricCount / 10 ) / 9 ) , // "0" for the first 90%, then "1"
) , 100 , 0 )
2021-02-09 09:38:35 -08:00
require . NoError ( b , err )
}
require . NoError ( b , app . Commit ( ) )
headIdxReader := head . indexRange ( 0 , 200 )
2022-05-04 14:41:36 -07:00
matchers := [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "c_ninety" , "value0" ) }
2021-02-09 09:38:35 -08:00
b . ResetTimer ( )
b . ReportAllocs ( )
for benchIdx := 0 ; benchIdx < b . N ; benchIdx ++ {
2023-09-14 07:02:04 -07:00
actualValues , err := headIdxReader . LabelValues ( ctx , "b_tens" , matchers ... )
2021-02-09 09:38:35 -08:00
require . NoError ( b , err )
2023-12-07 03:35:01 -08:00
require . Len ( b , actualValues , 9 )
2021-02-09 09:38:35 -08:00
}
}
2021-04-26 15:43:22 -07:00
2022-09-27 07:02:05 -07:00
func TestIteratorSeekIntoBuffer ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2021-04-26 15:43:22 -07:00
// This is usually taken from the Head, but passing manually here.
2022-01-10 05:36:45 -08:00
chunkDiskMapper , err := chunks . NewChunkDiskMapper ( nil , dir , chunkenc . NewPool ( ) , chunks . DefaultWriteBufferSize , chunks . DefaultWriteQueueSize )
2021-04-26 15:43:22 -07:00
require . NoError ( t , err )
defer func ( ) {
require . NoError ( t , chunkDiskMapper . Close ( ) )
} ( )
2023-05-25 13:12:32 -07:00
cOpts := chunkOpts {
chunkDiskMapper : chunkDiskMapper ,
chunkRange : 500 ,
samplesPerChunk : DefaultSamplesPerChunk ,
}
2021-04-26 15:43:22 -07:00
2024-01-29 03:57:27 -08:00
s := newMemSeries ( labels . Labels { } , 1 , 0 , defaultIsolationDisabled )
2021-04-26 15:43:22 -07:00
for i := 0 ; i < 7 ; i ++ {
2023-05-25 13:12:32 -07:00
ok , _ := s . append ( int64 ( i ) , float64 ( i ) , 0 , cOpts )
2021-04-26 15:43:22 -07:00
require . True ( t , ok , "sample append failed" )
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
c , _ , _ , err := s . chunk ( 0 , chunkDiskMapper , & sync . Pool {
2023-02-21 01:30:11 -08:00
New : func ( ) interface { } {
return & memChunk { }
} ,
} )
require . NoError ( t , err )
it := c . chunk . Iterator ( nil )
2021-04-26 15:43:22 -07:00
// First point.
2021-11-28 23:54:23 -08:00
require . Equal ( t , chunkenc . ValFloat , it . Seek ( 0 ) )
2021-04-26 15:43:22 -07:00
ts , val := it . At ( )
require . Equal ( t , int64 ( 0 ) , ts )
require . Equal ( t , float64 ( 0 ) , val )
// Advance one point.
2021-11-28 23:54:23 -08:00
require . Equal ( t , chunkenc . ValFloat , it . Next ( ) )
2021-04-26 15:43:22 -07:00
ts , val = it . At ( )
require . Equal ( t , int64 ( 1 ) , ts )
require . Equal ( t , float64 ( 1 ) , val )
// Seeking an older timestamp shouldn't cause the iterator to go backwards.
2021-11-28 23:54:23 -08:00
require . Equal ( t , chunkenc . ValFloat , it . Seek ( 0 ) )
2021-04-26 15:43:22 -07:00
ts , val = it . At ( )
require . Equal ( t , int64 ( 1 ) , ts )
require . Equal ( t , float64 ( 1 ) , val )
// Seek into the buffer.
2021-11-28 23:54:23 -08:00
require . Equal ( t , chunkenc . ValFloat , it . Seek ( 3 ) )
2021-04-26 15:43:22 -07:00
ts , val = it . At ( )
require . Equal ( t , int64 ( 3 ) , ts )
require . Equal ( t , float64 ( 3 ) , val )
// Iterate through the rest of the buffer.
for i := 4 ; i < 7 ; i ++ {
2021-11-28 23:54:23 -08:00
require . Equal ( t , chunkenc . ValFloat , it . Next ( ) )
2021-04-26 15:43:22 -07:00
ts , val = it . At ( )
require . Equal ( t , int64 ( i ) , ts )
require . Equal ( t , float64 ( i ) , val )
}
// Run out of elements in the iterator.
2021-11-28 23:54:23 -08:00
require . Equal ( t , chunkenc . ValNone , it . Next ( ) )
require . Equal ( t , chunkenc . ValNone , it . Seek ( 7 ) )
2021-04-26 15:43:22 -07:00
}
2021-06-30 07:48:13 -07:00
2021-07-20 01:47:20 -07:00
// Tests https://github.com/prometheus/prometheus/issues/8221.
func TestChunkNotFoundHeadGCRace ( t * testing . T ) {
db := newTestDB ( t )
db . DisableCompactions ( )
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2021-07-20 01:47:20 -07:00
var (
app = db . Appender ( context . Background ( ) )
2021-11-06 03:10:04 -07:00
ref = storage . SeriesRef ( 0 )
2021-07-20 01:47:20 -07:00
mint , maxt = int64 ( 0 ) , int64 ( 0 )
err error
)
// Appends samples to span over 1.5 block ranges.
// 7 chunks with 15s scrape interval.
for i := int64 ( 0 ) ; i <= 120 * 7 ; i ++ {
ts := i * DefaultBlockDuration / ( 4 * 120 )
ref , err = app . Append ( ref , labels . FromStrings ( "a" , "b" ) , ts , float64 ( i ) )
require . NoError ( t , err )
maxt = ts
}
require . NoError ( t , app . Commit ( ) )
// Get a querier before compaction (or when compaction is about to begin).
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( mint , maxt )
2021-07-20 01:47:20 -07:00
require . NoError ( t , err )
// Query the compacted range and get the first series before compaction.
2023-09-12 03:37:38 -07:00
ss := q . Select ( context . Background ( ) , true , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2021-07-20 01:47:20 -07:00
require . True ( t , ss . Next ( ) )
s := ss . At ( )
var wg sync . WaitGroup
wg . Add ( 1 )
go func ( ) {
defer wg . Done ( )
// Compacting head while the querier spans the compaction time.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2023-12-07 03:35:01 -08:00
require . NotEmpty ( t , db . Blocks ( ) )
2021-07-20 01:47:20 -07:00
} ( )
// Give enough time for compaction to finish.
// We expect it to be blocked until querier is closed.
<- time . After ( 3 * time . Second )
// Now consume after compaction when it's gone.
2022-09-20 10:16:45 -07:00
it := s . Iterator ( nil )
2021-11-28 23:54:23 -08:00
for it . Next ( ) == chunkenc . ValFloat {
2021-07-20 01:47:20 -07:00
_ , _ = it . At ( )
}
// It should error here without any fix for the mentioned issue.
require . NoError ( t , it . Err ( ) )
for ss . Next ( ) {
s = ss . At ( )
2022-09-20 10:16:45 -07:00
it = s . Iterator ( it )
2021-11-28 23:54:23 -08:00
for it . Next ( ) == chunkenc . ValFloat {
2021-07-20 01:47:20 -07:00
_ , _ = it . At ( )
}
require . NoError ( t , it . Err ( ) )
}
require . NoError ( t , ss . Err ( ) )
require . NoError ( t , q . Close ( ) )
wg . Wait ( )
}
// Tests https://github.com/prometheus/prometheus/issues/9079.
func TestDataMissingOnQueryDuringCompaction ( t * testing . T ) {
db := newTestDB ( t )
db . DisableCompactions ( )
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2021-07-20 01:47:20 -07:00
var (
app = db . Appender ( context . Background ( ) )
2021-11-06 03:10:04 -07:00
ref = storage . SeriesRef ( 0 )
2021-07-20 01:47:20 -07:00
mint , maxt = int64 ( 0 ) , int64 ( 0 )
err error
)
// Appends samples to span over 1.5 block ranges.
2023-08-24 06:21:17 -07:00
expSamples := make ( [ ] chunks . Sample , 0 )
2021-07-20 01:47:20 -07:00
// 7 chunks with 15s scrape interval.
for i := int64 ( 0 ) ; i <= 120 * 7 ; i ++ {
ts := i * DefaultBlockDuration / ( 4 * 120 )
ref , err = app . Append ( ref , labels . FromStrings ( "a" , "b" ) , ts , float64 ( i ) )
require . NoError ( t , err )
maxt = ts
2021-11-28 23:54:23 -08:00
expSamples = append ( expSamples , sample { ts , float64 ( i ) , nil , nil } )
2021-07-20 01:47:20 -07:00
}
require . NoError ( t , app . Commit ( ) )
// Get a querier before compaction (or when compaction is about to begin).
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( mint , maxt )
2021-07-20 01:47:20 -07:00
require . NoError ( t , err )
var wg sync . WaitGroup
wg . Add ( 1 )
go func ( ) {
defer wg . Done ( )
// Compacting head while the querier spans the compaction time.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2023-12-07 03:35:01 -08:00
require . NotEmpty ( t , db . Blocks ( ) )
2021-07-20 01:47:20 -07:00
} ( )
// Give enough time for compaction to finish.
// We expect it to be blocked until querier is closed.
<- time . After ( 3 * time . Second )
// Querying the querier that was got before compaction.
series := query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2023-08-24 06:21:17 -07:00
require . Equal ( t , map [ string ] [ ] chunks . Sample { ` { a="b"} ` : expSamples } , series )
2021-07-20 01:47:20 -07:00
wg . Wait ( )
}
func TestIsQuerierCollidingWithTruncation ( t * testing . T ) {
db := newTestDB ( t )
db . DisableCompactions ( )
var (
app = db . Appender ( context . Background ( ) )
2021-11-06 03:10:04 -07:00
ref = storage . SeriesRef ( 0 )
2021-07-20 01:47:20 -07:00
err error
)
for i := int64 ( 0 ) ; i <= 3000 ; i ++ {
ref , err = app . Append ( ref , labels . FromStrings ( "a" , "b" ) , i , float64 ( i ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
// This mocks truncation.
db . head . memTruncationInProcess . Store ( true )
db . head . lastMemoryTruncationTime . Store ( 2000 )
// Test that IsQuerierValid suggests correct querier ranges.
cases := [ ] struct {
mint , maxt int64 // For the querier.
expShouldClose , expGetNew bool
expNewMint int64
} {
{ - 200 , - 100 , true , false , 0 } ,
{ - 200 , 300 , true , false , 0 } ,
{ 100 , 1900 , true , false , 0 } ,
{ 1900 , 2200 , true , true , 2000 } ,
{ 2000 , 2500 , false , false , 0 } ,
}
for _ , c := range cases {
t . Run ( fmt . Sprintf ( "mint=%d,maxt=%d" , c . mint , c . maxt ) , func ( t * testing . T ) {
shouldClose , getNew , newMint := db . head . IsQuerierCollidingWithTruncation ( c . mint , c . maxt )
require . Equal ( t , c . expShouldClose , shouldClose )
require . Equal ( t , c . expGetNew , getNew )
if getNew {
require . Equal ( t , c . expNewMint , newMint )
}
} )
}
}
func TestWaitForPendingReadersInTimeRange ( t * testing . T ) {
db := newTestDB ( t )
db . DisableCompactions ( )
sampleTs := func ( i int64 ) int64 { return i * DefaultBlockDuration / ( 4 * 120 ) }
var (
app = db . Appender ( context . Background ( ) )
2021-11-06 03:10:04 -07:00
ref = storage . SeriesRef ( 0 )
2021-07-20 01:47:20 -07:00
err error
)
for i := int64 ( 0 ) ; i <= 3000 ; i ++ {
ts := sampleTs ( i )
ref , err = app . Append ( ref , labels . FromStrings ( "a" , "b" ) , ts , float64 ( i ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
truncMint , truncMaxt := int64 ( 1000 ) , int64 ( 2000 )
cases := [ ] struct {
mint , maxt int64
shouldWait bool
} {
{ 0 , 500 , false } , // Before truncation range.
{ 500 , 1500 , true } , // Overlaps with truncation at the start.
{ 1200 , 1700 , true } , // Within truncation range.
{ 1800 , 2500 , true } , // Overlaps with truncation at the end.
{ 2000 , 2500 , false } , // After truncation range.
{ 2100 , 2500 , false } , // After truncation range.
}
for _ , c := range cases {
t . Run ( fmt . Sprintf ( "mint=%d,maxt=%d,shouldWait=%t" , c . mint , c . maxt , c . shouldWait ) , func ( t * testing . T ) {
checkWaiting := func ( cl io . Closer ) {
var waitOver atomic . Bool
go func ( ) {
db . head . WaitForPendingReadersInTimeRange ( truncMint , truncMaxt )
waitOver . Store ( true )
} ( )
<- time . After ( 550 * time . Millisecond )
require . Equal ( t , ! c . shouldWait , waitOver . Load ( ) )
require . NoError ( t , cl . Close ( ) )
<- time . After ( 550 * time . Millisecond )
require . True ( t , waitOver . Load ( ) )
}
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( c . mint , c . maxt )
2021-07-20 01:47:20 -07:00
require . NoError ( t , err )
checkWaiting ( q )
2023-09-12 03:37:38 -07:00
cq , err := db . ChunkQuerier ( c . mint , c . maxt )
2021-07-20 01:47:20 -07:00
require . NoError ( t , err )
checkWaiting ( cq )
} )
}
}
2021-08-04 23:46:08 -07:00
2024-09-05 09:17:42 -07:00
func TestQueryOOOHeadDuringTruncate ( t * testing . T ) {
2024-09-09 03:43:02 -07:00
testQueryOOOHeadDuringTruncate ( t ,
func ( db * DB , minT , maxT int64 ) ( storage . LabelQuerier , error ) {
return db . Querier ( minT , maxT )
} ,
func ( t * testing . T , lq storage . LabelQuerier , minT , _ int64 ) {
// Samples
q , ok := lq . ( storage . Querier )
require . True ( t , ok )
ss := q . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
require . True ( t , ss . Next ( ) )
s := ss . At ( )
require . False ( t , ss . Next ( ) ) // One series.
it := s . Iterator ( nil )
require . NotEqual ( t , chunkenc . ValNone , it . Next ( ) ) // Has some data.
2024-09-09 03:51:02 -07:00
require . Equal ( t , minT , it . AtT ( ) ) // It is an in-order sample.
2024-09-09 03:43:02 -07:00
require . NotEqual ( t , chunkenc . ValNone , it . Next ( ) ) // Has some data.
2024-09-09 03:51:02 -07:00
require . Equal ( t , minT + 50 , it . AtT ( ) ) // it is an out-of-order sample.
2024-09-09 03:43:02 -07:00
require . NoError ( t , it . Err ( ) )
} ,
)
}
func TestChunkQueryOOOHeadDuringTruncate ( t * testing . T ) {
testQueryOOOHeadDuringTruncate ( t ,
func ( db * DB , minT , maxT int64 ) ( storage . LabelQuerier , error ) {
return db . ChunkQuerier ( minT , maxT )
} ,
func ( t * testing . T , lq storage . LabelQuerier , minT , _ int64 ) {
// Chunks
q , ok := lq . ( storage . ChunkQuerier )
require . True ( t , ok )
ss := q . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
require . True ( t , ss . Next ( ) )
s := ss . At ( )
require . False ( t , ss . Next ( ) ) // One series.
metaIt := s . Iterator ( nil )
require . True ( t , metaIt . Next ( ) )
meta := metaIt . At ( )
// Samples
it := meta . Chunk . Iterator ( nil )
require . NotEqual ( t , chunkenc . ValNone , it . Next ( ) ) // Has some data.
2024-09-09 03:51:02 -07:00
require . Equal ( t , minT , it . AtT ( ) ) // It is an in-order sample.
2024-09-09 03:43:02 -07:00
require . NotEqual ( t , chunkenc . ValNone , it . Next ( ) ) // Has some data.
2024-09-09 03:51:02 -07:00
require . Equal ( t , minT + 50 , it . AtT ( ) ) // it is an out-of-order sample.
2024-09-09 03:43:02 -07:00
require . NoError ( t , it . Err ( ) )
} ,
)
}
func testQueryOOOHeadDuringTruncate ( t * testing . T , makeQuerier func ( db * DB , minT , maxT int64 ) ( storage . LabelQuerier , error ) , verify func ( t * testing . T , q storage . LabelQuerier , minT , maxT int64 ) ) {
2024-09-05 09:17:42 -07:00
const maxT int64 = 6000
dir := t . TempDir ( )
opts := DefaultOptions ( )
opts . EnableNativeHistograms = true
opts . OutOfOrderTimeWindow = maxT
opts . MinBlockDuration = maxT / 2 // So that head will compact up to 3000.
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
db . DisableCompactions ( )
var (
ref = storage . SeriesRef ( 0 )
app = db . Appender ( context . Background ( ) )
)
// Add in-order samples at every 100ms starting at 0ms.
for i := int64 ( 0 ) ; i < maxT ; i += 100 {
_ , err := app . Append ( ref , labels . FromStrings ( "a" , "b" ) , i , 0 )
require . NoError ( t , err )
}
// Add out-of-order samples at every 100ms starting at 50ms.
for i := int64 ( 50 ) ; i < maxT ; i += 100 {
_ , err := app . Append ( ref , labels . FromStrings ( "a" , "b" ) , i , 0 )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
requireEqualOOOSamples ( t , int ( maxT / 100 - 1 ) , db )
// Synchronization points.
allowQueryToStart := make ( chan struct { } )
queryStarted := make ( chan struct { } )
compactionFinished := make ( chan struct { } )
db . head . memTruncationCallBack = func ( ) {
// Compaction has started, let the query start and wait for it to actually start to simulate race condition.
allowQueryToStart <- struct { } { }
<- queryStarted
}
go func ( ) {
db . Compact ( context . Background ( ) ) // Compact and write blocks up to 3000 (maxtT/2).
compactionFinished <- struct { } { }
} ( )
// Wait for the compaction to start.
<- allowQueryToStart
2024-09-09 03:43:02 -07:00
q , err := makeQuerier ( db , 1500 , 2500 )
2024-09-05 09:17:42 -07:00
require . NoError ( t , err )
queryStarted <- struct { } { } // Unblock the compaction.
ctx := context . Background ( )
// Label names.
res , annots , err := q . LabelNames ( ctx , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
require . NoError ( t , err )
require . Empty ( t , annots )
require . Equal ( t , [ ] string { "a" } , res )
// Label values.
res , annots , err = q . LabelValues ( ctx , "a" , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
require . NoError ( t , err )
require . Empty ( t , annots )
require . Equal ( t , [ ] string { "b" } , res )
2024-09-09 03:43:02 -07:00
verify ( t , q , 1500 , 2500 )
2024-09-05 09:17:42 -07:00
require . NoError ( t , q . Close ( ) ) // Cannot be deferred as the compaction waits for queries to close before finishing.
<- compactionFinished // Wait for compaction otherwise Go test finds stray goroutines.
}
2021-06-30 07:48:13 -07:00
func TestAppendHistogram ( t * testing . T ) {
2022-03-09 14:17:29 -08:00
l := labels . FromStrings ( "a" , "b" )
2021-06-30 07:48:13 -07:00
for _ , numHistograms := range [ ] int { 1 , 10 , 150 , 200 , 250 , 300 } {
2024-05-13 08:36:19 -07:00
t . Run ( strconv . Itoa ( numHistograms ) , func ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2021-06-30 07:48:13 -07:00
t . Cleanup ( func ( ) {
require . NoError ( t , head . Close ( ) )
} )
require . NoError ( t , head . Init ( 0 ) )
2022-12-28 00:55:07 -08:00
ingestTs := int64 ( 0 )
2021-06-30 07:48:13 -07:00
app := head . Appender ( context . Background ( ) )
2023-08-24 06:21:17 -07:00
expHistograms := make ( [ ] chunks . Sample , 0 , 2 * numHistograms )
2023-01-10 15:49:13 -08:00
// Counter integer histograms.
2023-02-10 03:39:33 -08:00
for _ , h := range tsdbutil . GenerateTestHistograms ( numHistograms ) {
2022-12-28 00:55:07 -08:00
_ , err := app . AppendHistogram ( 0 , l , ingestTs , h , nil )
2021-06-30 07:48:13 -07:00
require . NoError ( t , err )
2023-01-18 08:59:29 -08:00
expHistograms = append ( expHistograms , sample { t : ingestTs , h : h } )
2022-12-28 00:55:07 -08:00
ingestTs ++
if ingestTs % 50 == 0 {
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
}
}
2023-01-10 15:49:13 -08:00
// Gauge integer histograms.
2023-02-10 03:39:33 -08:00
for _ , h := range tsdbutil . GenerateTestGaugeHistograms ( numHistograms ) {
2023-01-10 15:49:13 -08:00
_ , err := app . AppendHistogram ( 0 , l , ingestTs , h , nil )
require . NoError ( t , err )
2023-01-18 08:59:29 -08:00
expHistograms = append ( expHistograms , sample { t : ingestTs , h : h } )
2023-01-10 15:49:13 -08:00
ingestTs ++
if ingestTs % 50 == 0 {
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
}
}
2023-08-24 06:21:17 -07:00
expFloatHistograms := make ( [ ] chunks . Sample , 0 , 2 * numHistograms )
2023-01-10 15:49:13 -08:00
// Counter float histograms.
2023-02-10 03:39:33 -08:00
for _ , fh := range tsdbutil . GenerateTestFloatHistograms ( numHistograms ) {
2022-12-28 00:55:07 -08:00
_ , err := app . AppendHistogram ( 0 , l , ingestTs , nil , fh )
require . NoError ( t , err )
2023-01-18 08:59:29 -08:00
expFloatHistograms = append ( expFloatHistograms , sample { t : ingestTs , fh : fh } )
2022-12-28 00:55:07 -08:00
ingestTs ++
if ingestTs % 50 == 0 {
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
}
2021-06-30 07:48:13 -07:00
}
2023-01-04 02:24:15 -08:00
2023-01-10 15:49:13 -08:00
// Gauge float histograms.
2023-02-10 03:39:33 -08:00
for _ , fh := range tsdbutil . GenerateTestGaugeFloatHistograms ( numHistograms ) {
2023-01-04 02:24:15 -08:00
_ , err := app . AppendHistogram ( 0 , l , ingestTs , nil , fh )
require . NoError ( t , err )
2023-01-18 08:59:29 -08:00
expFloatHistograms = append ( expFloatHistograms , sample { t : ingestTs , fh : fh } )
2023-01-04 02:24:15 -08:00
ingestTs ++
if ingestTs % 50 == 0 {
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
}
}
2023-01-10 15:49:13 -08:00
2021-06-30 07:48:13 -07:00
require . NoError ( t , app . Commit ( ) )
q , err := NewBlockQuerier ( head , head . MinTime ( ) , head . MaxTime ( ) )
require . NoError ( t , err )
2021-07-04 03:42:37 -07:00
t . Cleanup ( func ( ) {
require . NoError ( t , q . Close ( ) )
} )
2021-06-30 07:48:13 -07:00
2023-09-12 03:37:38 -07:00
ss := q . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2021-06-30 07:48:13 -07:00
require . True ( t , ss . Next ( ) )
s := ss . At ( )
require . False ( t , ss . Next ( ) )
2022-09-20 10:16:45 -07:00
it := s . Iterator ( nil )
2023-08-24 06:21:17 -07:00
actHistograms := make ( [ ] chunks . Sample , 0 , len ( expHistograms ) )
actFloatHistograms := make ( [ ] chunks . Sample , 0 , len ( expFloatHistograms ) )
2022-12-28 00:55:07 -08:00
for typ := it . Next ( ) ; typ != chunkenc . ValNone ; typ = it . Next ( ) {
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch typ {
case chunkenc . ValHistogram :
2024-01-23 08:02:14 -08:00
ts , h := it . AtHistogram ( nil )
2023-01-18 08:59:29 -08:00
actHistograms = append ( actHistograms , sample { t : ts , h : h } )
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case chunkenc . ValFloatHistogram :
2024-01-23 08:02:14 -08:00
ts , fh := it . AtFloatHistogram ( nil )
2023-01-18 08:59:29 -08:00
actFloatHistograms = append ( actFloatHistograms , sample { t : ts , fh : fh } )
2022-12-28 00:55:07 -08:00
}
2021-06-30 07:48:13 -07:00
}
2023-01-18 08:59:29 -08:00
compareSeries (
t ,
2023-08-24 06:21:17 -07:00
map [ string ] [ ] chunks . Sample { "dummy" : expHistograms } ,
map [ string ] [ ] chunks . Sample { "dummy" : actHistograms } ,
2023-01-18 08:59:29 -08:00
)
compareSeries (
t ,
2023-08-24 06:21:17 -07:00
map [ string ] [ ] chunks . Sample { "dummy" : expFloatHistograms } ,
map [ string ] [ ] chunks . Sample { "dummy" : actFloatHistograms } ,
2023-01-18 08:59:29 -08:00
)
2021-06-30 07:48:13 -07:00
} )
}
}
2022-08-29 03:51:32 -07:00
func TestHistogramInWALAndMmapChunk ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 3000 , wlog . CompressionNone , false )
2021-08-11 05:08:48 -07:00
t . Cleanup ( func ( ) {
require . NoError ( t , head . Close ( ) )
} )
require . NoError ( t , head . Init ( 0 ) )
2022-08-29 03:51:32 -07:00
// Series with only histograms.
2022-03-09 14:17:29 -08:00
s1 := labels . FromStrings ( "a" , "b1" )
2022-08-29 03:51:32 -07:00
k1 := s1 . String ( )
2023-01-04 02:24:15 -08:00
numHistograms := 300
2023-08-24 06:21:17 -07:00
exp := map [ string ] [ ] chunks . Sample { }
2022-12-28 00:55:07 -08:00
ts := int64 ( 0 )
2023-01-10 15:49:13 -08:00
var app storage . Appender
for _ , gauge := range [ ] bool { true , false } {
app = head . Appender ( context . Background ( ) )
var hists [ ] * histogram . Histogram
if gauge {
2023-02-10 03:39:33 -08:00
hists = tsdbutil . GenerateTestGaugeHistograms ( numHistograms )
2023-01-10 15:49:13 -08:00
} else {
2023-02-10 03:39:33 -08:00
hists = tsdbutil . GenerateTestHistograms ( numHistograms )
2022-12-28 00:55:07 -08:00
}
2023-01-10 15:49:13 -08:00
for _ , h := range hists {
h . NegativeSpans = h . PositiveSpans
h . NegativeBuckets = h . PositiveBuckets
_ , err := app . AppendHistogram ( 0 , s1 , ts , h , nil )
require . NoError ( t , err )
exp [ k1 ] = append ( exp [ k1 ] , sample { t : ts , h : h . Copy ( ) } )
ts ++
if ts % 5 == 0 {
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
}
}
require . NoError ( t , app . Commit ( ) )
2022-12-28 00:55:07 -08:00
}
2023-01-04 02:24:15 -08:00
for _ , gauge := range [ ] bool { true , false } {
app = head . Appender ( context . Background ( ) )
var hists [ ] * histogram . FloatHistogram
if gauge {
2023-02-10 03:39:33 -08:00
hists = tsdbutil . GenerateTestGaugeFloatHistograms ( numHistograms )
2023-01-04 02:24:15 -08:00
} else {
2023-02-10 03:39:33 -08:00
hists = tsdbutil . GenerateTestFloatHistograms ( numHistograms )
2022-08-29 03:51:32 -07:00
}
2023-01-04 02:24:15 -08:00
for _ , h := range hists {
h . NegativeSpans = h . PositiveSpans
h . NegativeBuckets = h . PositiveBuckets
_ , err := app . AppendHistogram ( 0 , s1 , ts , nil , h )
require . NoError ( t , err )
exp [ k1 ] = append ( exp [ k1 ] , sample { t : ts , fh : h . Copy ( ) } )
ts ++
if ts % 5 == 0 {
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
}
}
require . NoError ( t , app . Commit ( ) )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
head . mmapHeadChunks ( )
2022-08-29 03:51:32 -07:00
}
2023-08-24 06:21:17 -07:00
// There should be 20 mmap chunks in s1.
2022-08-29 03:51:32 -07:00
ms := head . series . getByHash ( s1 . Hash ( ) , s1 )
2023-08-24 06:21:17 -07:00
require . Len ( t , ms . mmappedChunks , 25 )
expMmapChunks := make ( [ ] * mmappedChunk , 0 , 20 )
2022-08-29 03:51:32 -07:00
for _ , mmap := range ms . mmappedChunks {
2024-08-18 02:27:04 -07:00
require . Positive ( t , mmap . numSamples )
2022-08-29 03:51:32 -07:00
cpy := * mmap
expMmapChunks = append ( expMmapChunks , & cpy )
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
expHeadChunkSamples := ms . headChunks . chunk . NumSamples ( )
2024-05-27 12:57:45 -07:00
require . Positive ( t , expHeadChunkSamples )
2022-08-29 03:51:32 -07:00
// Series with mix of histograms and float.
2022-03-09 14:17:29 -08:00
s2 := labels . FromStrings ( "a" , "b2" )
2022-08-29 03:51:32 -07:00
k2 := s2 . String ( )
2022-12-28 00:55:07 -08:00
ts = 0
2023-01-10 15:49:13 -08:00
for _ , gauge := range [ ] bool { true , false } {
app = head . Appender ( context . Background ( ) )
var hists [ ] * histogram . Histogram
if gauge {
2023-02-10 03:39:33 -08:00
hists = tsdbutil . GenerateTestGaugeHistograms ( 100 )
2023-01-10 15:49:13 -08:00
} else {
2023-02-10 03:39:33 -08:00
hists = tsdbutil . GenerateTestHistograms ( 100 )
2023-01-10 15:49:13 -08:00
}
for _ , h := range hists {
ts ++
h . NegativeSpans = h . PositiveSpans
h . NegativeBuckets = h . PositiveBuckets
2023-04-09 00:08:40 -07:00
_ , err := app . AppendHistogram ( 0 , s2 , ts , h , nil )
2023-01-10 15:49:13 -08:00
require . NoError ( t , err )
2023-01-18 08:59:29 -08:00
eh := h . Copy ( )
if ! gauge && ts > 30 && ( ts - 10 ) % 20 == 1 {
// Need "unknown" hint after float sample.
eh . CounterResetHint = histogram . UnknownCounterReset
}
2023-04-09 00:08:40 -07:00
exp [ k2 ] = append ( exp [ k2 ] , sample { t : ts , h : eh } )
2023-01-10 15:49:13 -08:00
if ts % 20 == 0 {
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
// Add some float.
for i := 0 ; i < 10 ; i ++ {
ts ++
2023-04-09 00:08:40 -07:00
_ , err := app . Append ( 0 , s2 , ts , float64 ( ts ) )
2023-01-10 15:49:13 -08:00
require . NoError ( t , err )
2023-04-09 00:08:40 -07:00
exp [ k2 ] = append ( exp [ k2 ] , sample { t : ts , f : float64 ( ts ) } )
2023-01-10 15:49:13 -08:00
}
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
2022-08-29 03:51:32 -07:00
}
}
2023-01-10 15:49:13 -08:00
require . NoError ( t , app . Commit ( ) )
2021-08-11 05:08:48 -07:00
}
2023-01-04 02:24:15 -08:00
for _ , gauge := range [ ] bool { true , false } {
app = head . Appender ( context . Background ( ) )
var hists [ ] * histogram . FloatHistogram
if gauge {
2023-02-10 03:39:33 -08:00
hists = tsdbutil . GenerateTestGaugeFloatHistograms ( 100 )
2023-01-04 02:24:15 -08:00
} else {
2023-02-10 03:39:33 -08:00
hists = tsdbutil . GenerateTestFloatHistograms ( 100 )
2023-01-04 02:24:15 -08:00
}
for _ , h := range hists {
ts ++
h . NegativeSpans = h . PositiveSpans
h . NegativeBuckets = h . PositiveBuckets
2023-04-09 00:08:40 -07:00
_ , err := app . AppendHistogram ( 0 , s2 , ts , nil , h )
2023-01-04 02:24:15 -08:00
require . NoError ( t , err )
2023-01-18 08:59:29 -08:00
eh := h . Copy ( )
if ! gauge && ts > 30 && ( ts - 10 ) % 20 == 1 {
// Need "unknown" hint after float sample.
eh . CounterResetHint = histogram . UnknownCounterReset
}
2023-04-09 00:08:40 -07:00
exp [ k2 ] = append ( exp [ k2 ] , sample { t : ts , fh : eh } )
2023-01-04 02:24:15 -08:00
if ts % 20 == 0 {
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
// Add some float.
for i := 0 ; i < 10 ; i ++ {
ts ++
2023-04-09 00:08:40 -07:00
_ , err := app . Append ( 0 , s2 , ts , float64 ( ts ) )
2023-01-04 02:24:15 -08:00
require . NoError ( t , err )
2023-04-09 00:08:40 -07:00
exp [ k2 ] = append ( exp [ k2 ] , sample { t : ts , f : float64 ( ts ) } )
2023-01-04 02:24:15 -08:00
}
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
2022-12-28 00:55:07 -08:00
}
}
2023-01-04 02:24:15 -08:00
require . NoError ( t , app . Commit ( ) )
2022-12-28 00:55:07 -08:00
}
2021-08-11 05:08:48 -07:00
// Restart head.
require . NoError ( t , head . Close ( ) )
2023-01-04 02:24:15 -08:00
startHead := func ( ) {
2023-07-11 05:57:57 -07:00
w , err := wlog . NewSize ( nil , nil , head . wal . Dir ( ) , 32768 , wlog . CompressionNone )
2023-01-04 02:24:15 -08:00
require . NoError ( t , err )
head , err = NewHead ( nil , nil , w , nil , head . opts , nil )
require . NoError ( t , err )
require . NoError ( t , head . Init ( 0 ) )
}
startHead ( )
2021-08-11 05:08:48 -07:00
2022-08-29 03:51:32 -07:00
// Checking contents of s1.
ms = head . series . getByHash ( s1 . Hash ( ) , s1 )
require . Equal ( t , expMmapChunks , ms . mmappedChunks )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
require . Equal ( t , expHeadChunkSamples , ms . headChunks . chunk . NumSamples ( ) )
2021-08-11 05:08:48 -07:00
2023-01-04 02:24:15 -08:00
testQuery := func ( ) {
q , err := NewBlockQuerier ( head , head . MinTime ( ) , head . MaxTime ( ) )
require . NoError ( t , err )
act := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "a" , "b.*" ) )
2023-01-18 08:59:29 -08:00
compareSeries ( t , exp , act )
2023-01-04 02:24:15 -08:00
}
testQuery ( )
// Restart with no mmap chunks to test WAL replay.
require . NoError ( t , head . Close ( ) )
require . NoError ( t , os . RemoveAll ( mmappedChunksDir ( head . opts . ChunkDirRoot ) ) )
startHead ( )
testQuery ( )
2021-08-11 05:08:48 -07:00
}
2021-08-11 03:13:17 -07:00
func TestChunkSnapshot ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 120 * 4 , wlog . CompressionNone , false )
2021-08-11 03:13:17 -07:00
defer func ( ) {
head . opts . EnableMemorySnapshotOnShutdown = false
require . NoError ( t , head . Close ( ) )
} ( )
2021-08-30 07:04:38 -07:00
type ex struct {
seriesLabels labels . Labels
e exemplar . Exemplar
}
2021-08-11 03:13:17 -07:00
numSeries := 10
2023-08-24 06:21:17 -07:00
expSeries := make ( map [ string ] [ ] chunks . Sample )
expHist := make ( map [ string ] [ ] chunks . Sample )
expFloatHist := make ( map [ string ] [ ] chunks . Sample )
2021-11-06 03:10:04 -07:00
expTombstones := make ( map [ storage . SeriesRef ] tombstones . Intervals )
2021-08-30 07:04:38 -07:00
expExemplars := make ( [ ] ex , 0 )
2023-07-05 02:44:13 -07:00
histograms := tsdbutil . GenerateTestGaugeHistograms ( 481 )
floatHistogram := tsdbutil . GenerateTestGaugeFloatHistograms ( 481 )
2021-08-30 07:04:38 -07:00
2021-11-06 03:10:04 -07:00
addExemplar := func ( app storage . Appender , ref storage . SeriesRef , lbls labels . Labels , ts int64 ) {
2021-08-30 07:04:38 -07:00
e := ex {
seriesLabels : lbls ,
e : exemplar . Exemplar {
2024-05-13 08:36:19 -07:00
Labels : labels . FromStrings ( "trace_id" , strconv . Itoa ( rand . Int ( ) ) ) ,
2021-08-30 07:04:38 -07:00
Value : rand . Float64 ( ) ,
Ts : ts ,
} ,
}
expExemplars = append ( expExemplars , e )
_ , err := app . AppendExemplar ( ref , e . seriesLabels , e . e )
require . NoError ( t , err )
}
checkSamples := func ( ) {
q , err := NewBlockQuerier ( head , math . MinInt64 , math . MaxInt64 )
require . NoError ( t , err )
2023-07-05 02:44:13 -07:00
series := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar.*" ) )
2021-08-30 07:04:38 -07:00
require . Equal ( t , expSeries , series )
}
2023-07-05 02:44:13 -07:00
checkHistograms := func ( ) {
q , err := NewBlockQuerier ( head , math . MinInt64 , math . MaxInt64 )
require . NoError ( t , err )
series := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "hist" , "baz.*" ) )
require . Equal ( t , expHist , series )
}
checkFloatHistograms := func ( ) {
q , err := NewBlockQuerier ( head , math . MinInt64 , math . MaxInt64 )
require . NoError ( t , err )
series := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "floathist" , "bat.*" ) )
require . Equal ( t , expFloatHist , series )
}
2021-08-30 07:04:38 -07:00
checkTombstones := func ( ) {
tr , err := head . Tombstones ( )
require . NoError ( t , err )
2021-11-06 03:10:04 -07:00
actTombstones := make ( map [ storage . SeriesRef ] tombstones . Intervals )
require . NoError ( t , tr . Iter ( func ( ref storage . SeriesRef , itvs tombstones . Intervals ) error {
2021-08-30 07:04:38 -07:00
for _ , itv := range itvs {
actTombstones [ ref ] . Add ( itv )
}
return nil
} ) )
require . Equal ( t , expTombstones , actTombstones )
}
checkExemplars := func ( ) {
actExemplars := make ( [ ] ex , 0 , len ( expExemplars ) )
err := head . exemplars . IterateExemplars ( func ( seriesLabels labels . Labels , e exemplar . Exemplar ) error {
actExemplars = append ( actExemplars , ex {
seriesLabels : seriesLabels ,
e : e ,
} )
return nil
} )
require . NoError ( t , err )
// Verifies both existence of right exemplars and order of exemplars in the buffer.
2023-04-16 05:13:31 -07:00
testutil . RequireEqualWithOptions ( t , expExemplars , actExemplars , [ ] cmp . Option { cmp . AllowUnexported ( ex { } ) } )
2021-08-30 07:04:38 -07:00
}
2021-09-08 07:23:44 -07:00
var (
wlast , woffset int
err error
)
closeHeadAndCheckSnapshot := func ( ) {
require . NoError ( t , head . Close ( ) )
_ , sidx , soffset , err := LastChunkSnapshot ( head . opts . ChunkDirRoot )
require . NoError ( t , err )
require . Equal ( t , wlast , sidx )
require . Equal ( t , woffset , soffset )
}
openHeadAndCheckReplay := func ( ) {
2023-07-11 05:57:57 -07:00
w , err := wlog . NewSize ( nil , nil , head . wal . Dir ( ) , 32768 , wlog . CompressionNone )
2021-09-08 07:23:44 -07:00
require . NoError ( t , err )
2022-09-20 10:05:50 -07:00
head , err = NewHead ( nil , nil , w , nil , head . opts , nil )
2021-09-08 07:23:44 -07:00
require . NoError ( t , err )
require . NoError ( t , head . Init ( math . MinInt64 ) )
checkSamples ( )
2023-07-05 02:44:13 -07:00
checkHistograms ( )
checkFloatHistograms ( )
2021-09-08 07:23:44 -07:00
checkTombstones ( )
checkExemplars ( )
}
2021-08-11 03:13:17 -07:00
{ // Initial data that goes into snapshot.
// Add some initial samples with >=1 m-map chunk.
app := head . Appender ( context . Background ( ) )
for i := 1 ; i <= numSeries ; i ++ {
2022-03-09 14:17:29 -08:00
lbls := labels . FromStrings ( "foo" , fmt . Sprintf ( "bar%d" , i ) )
2021-08-11 03:13:17 -07:00
lblStr := lbls . String ( )
2023-07-05 02:44:13 -07:00
lblsHist := labels . FromStrings ( "hist" , fmt . Sprintf ( "baz%d" , i ) )
lblsHistStr := lblsHist . String ( )
lblsFloatHist := labels . FromStrings ( "floathist" , fmt . Sprintf ( "bat%d" , i ) )
lblsFloatHistStr := lblsFloatHist . String ( )
2021-08-11 03:13:17 -07:00
// 240 samples should m-map at least 1 chunk.
for ts := int64 ( 1 ) ; ts <= 240 ; ts ++ {
val := rand . Float64 ( )
2021-11-28 23:54:23 -08:00
expSeries [ lblStr ] = append ( expSeries [ lblStr ] , sample { ts , val , nil , nil } )
2021-08-30 07:04:38 -07:00
ref , err := app . Append ( 0 , lbls , ts , val )
2021-08-11 03:13:17 -07:00
require . NoError ( t , err )
2021-08-17 10:08:16 -07:00
2023-07-05 02:44:13 -07:00
hist := histograms [ int ( ts ) ]
expHist [ lblsHistStr ] = append ( expHist [ lblsHistStr ] , sample { ts , 0 , hist , nil } )
_ , err = app . AppendHistogram ( 0 , lblsHist , ts , hist , nil )
require . NoError ( t , err )
floatHist := floatHistogram [ int ( ts ) ]
expFloatHist [ lblsFloatHistStr ] = append ( expFloatHist [ lblsFloatHistStr ] , sample { ts , 0 , nil , floatHist } )
_ , err = app . AppendHistogram ( 0 , lblsFloatHist , ts , nil , floatHist )
require . NoError ( t , err )
2021-08-30 07:04:38 -07:00
// Add an exemplar and to create multiple WAL records.
2021-08-17 10:08:16 -07:00
if ts % 10 == 0 {
2021-08-30 07:04:38 -07:00
addExemplar ( app , ref , lbls , ts )
2021-08-17 10:08:16 -07:00
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
}
2021-08-11 03:13:17 -07:00
}
}
require . NoError ( t , app . Commit ( ) )
// Add some tombstones.
var enc record . Encoder
for i := 1 ; i <= numSeries ; i ++ {
2021-11-06 03:10:04 -07:00
ref := storage . SeriesRef ( i )
2021-08-11 03:13:17 -07:00
itvs := tombstones . Intervals {
{ Mint : 1234 , Maxt : 2345 } ,
{ Mint : 3456 , Maxt : 4567 } ,
}
for _ , itv := range itvs {
expTombstones [ ref ] . Add ( itv )
}
head . tombstones . AddInterval ( ref , itvs ... )
err := head . wal . Log ( enc . Tombstones ( [ ] tombstones . Stone {
{ Ref : ref , Intervals : itvs } ,
} , nil ) )
require . NoError ( t , err )
}
}
// These references should be the ones used for the snapshot.
2021-09-08 07:23:44 -07:00
wlast , woffset , err = head . wal . LastSegmentAndOffset ( )
2021-08-11 03:13:17 -07:00
require . NoError ( t , err )
2021-09-13 06:00:41 -07:00
if woffset != 0 && woffset < 32 * 1024 {
// The page is always filled before taking the snapshot.
woffset = 32 * 1024
2021-08-11 03:13:17 -07:00
}
2021-09-08 07:23:44 -07:00
{
// Creating snapshot and verifying it.
2021-08-06 09:51:01 -07:00
head . opts . EnableMemorySnapshotOnShutdown = true
2021-09-08 07:23:44 -07:00
closeHeadAndCheckSnapshot ( ) // This will create a snapshot.
2021-08-11 03:13:17 -07:00
2021-09-08 07:23:44 -07:00
// Test the replay of snapshot.
openHeadAndCheckReplay ( )
2021-08-11 03:13:17 -07:00
}
{ // Additional data to only include in WAL and m-mapped chunks and not snapshot. This mimics having an old snapshot on disk.
// Add more samples.
app := head . Appender ( context . Background ( ) )
for i := 1 ; i <= numSeries ; i ++ {
2022-03-09 14:17:29 -08:00
lbls := labels . FromStrings ( "foo" , fmt . Sprintf ( "bar%d" , i ) )
2021-08-11 03:13:17 -07:00
lblStr := lbls . String ( )
2023-07-05 02:44:13 -07:00
lblsHist := labels . FromStrings ( "hist" , fmt . Sprintf ( "baz%d" , i ) )
lblsHistStr := lblsHist . String ( )
lblsFloatHist := labels . FromStrings ( "floathist" , fmt . Sprintf ( "bat%d" , i ) )
lblsFloatHistStr := lblsFloatHist . String ( )
2021-08-11 03:13:17 -07:00
// 240 samples should m-map at least 1 chunk.
for ts := int64 ( 241 ) ; ts <= 480 ; ts ++ {
val := rand . Float64 ( )
2021-11-28 23:54:23 -08:00
expSeries [ lblStr ] = append ( expSeries [ lblStr ] , sample { ts , val , nil , nil } )
2021-08-30 07:04:38 -07:00
ref , err := app . Append ( 0 , lbls , ts , val )
2021-08-11 03:13:17 -07:00
require . NoError ( t , err )
2021-08-17 10:08:16 -07:00
2023-07-05 02:44:13 -07:00
hist := histograms [ int ( ts ) ]
expHist [ lblsHistStr ] = append ( expHist [ lblsHistStr ] , sample { ts , 0 , hist , nil } )
_ , err = app . AppendHistogram ( 0 , lblsHist , ts , hist , nil )
require . NoError ( t , err )
floatHist := floatHistogram [ int ( ts ) ]
expFloatHist [ lblsFloatHistStr ] = append ( expFloatHist [ lblsFloatHistStr ] , sample { ts , 0 , nil , floatHist } )
_ , err = app . AppendHistogram ( 0 , lblsFloatHist , ts , nil , floatHist )
require . NoError ( t , err )
2021-08-30 07:04:38 -07:00
// Add an exemplar and to create multiple WAL records.
2021-08-17 10:08:16 -07:00
if ts % 10 == 0 {
2021-08-30 07:04:38 -07:00
addExemplar ( app , ref , lbls , ts )
2021-08-17 10:08:16 -07:00
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
}
2021-08-11 03:13:17 -07:00
}
}
require . NoError ( t , app . Commit ( ) )
// Add more tombstones.
var enc record . Encoder
for i := 1 ; i <= numSeries ; i ++ {
2021-11-06 03:10:04 -07:00
ref := storage . SeriesRef ( i )
2021-08-11 03:13:17 -07:00
itvs := tombstones . Intervals {
{ Mint : 12345 , Maxt : 23456 } ,
{ Mint : 34567 , Maxt : 45678 } ,
}
for _ , itv := range itvs {
expTombstones [ ref ] . Add ( itv )
}
head . tombstones . AddInterval ( ref , itvs ... )
err := head . wal . Log ( enc . Tombstones ( [ ] tombstones . Stone {
{ Ref : ref , Intervals : itvs } ,
} , nil ) )
require . NoError ( t , err )
}
}
2021-09-08 07:23:44 -07:00
{
// Close Head and verify that new snapshot was not created.
2021-08-11 03:13:17 -07:00
head . opts . EnableMemorySnapshotOnShutdown = false
2021-09-08 07:23:44 -07:00
closeHeadAndCheckSnapshot ( ) // This should not create a snapshot.
2021-08-11 03:13:17 -07:00
2021-09-08 07:23:44 -07:00
// Test the replay of snapshot, m-map chunks, and WAL.
head . opts . EnableMemorySnapshotOnShutdown = true // Enabled to read from snapshot.
openHeadAndCheckReplay ( )
2021-08-11 03:13:17 -07:00
}
2021-09-08 07:23:44 -07:00
// Creating another snapshot should delete the older snapshot and replay still works fine.
wlast , woffset , err = head . wal . LastSegmentAndOffset ( )
require . NoError ( t , err )
2021-09-13 06:00:41 -07:00
if woffset != 0 && woffset < 32 * 1024 {
// The page is always filled before taking the snapshot.
woffset = 32 * 1024
}
2021-08-11 03:13:17 -07:00
2021-09-08 07:23:44 -07:00
{
// Close Head and verify that new snapshot was created.
closeHeadAndCheckSnapshot ( )
2021-08-11 03:13:17 -07:00
2021-09-08 07:23:44 -07:00
// Verify that there is only 1 snapshot.
2022-04-27 02:24:36 -07:00
files , err := os . ReadDir ( head . opts . ChunkDirRoot )
2021-08-11 03:13:17 -07:00
require . NoError ( t , err )
2021-09-08 07:23:44 -07:00
snapshots := 0
for i := len ( files ) - 1 ; i >= 0 ; i -- {
fi := files [ i ]
if strings . HasPrefix ( fi . Name ( ) , chunkSnapshotPrefix ) {
snapshots ++
require . Equal ( t , chunkSnapshotDir ( wlast , woffset ) , fi . Name ( ) )
2021-08-11 03:13:17 -07:00
}
2021-09-08 07:23:44 -07:00
}
require . Equal ( t , 1 , snapshots )
2021-08-06 09:51:01 -07:00
2021-09-08 07:23:44 -07:00
// Test the replay of snapshot.
head . opts . EnableMemorySnapshotOnShutdown = true // Enabled to read from snapshot.
2021-10-04 22:21:25 -07:00
// Disabling exemplars to check that it does not hard fail replay
// https://github.com/prometheus/prometheus/issues/9437#issuecomment-933285870.
head . opts . EnableExemplarStorage = false
head . opts . MaxExemplars . Store ( 0 )
expExemplars = expExemplars [ : 0 ]
2021-09-08 07:23:44 -07:00
openHeadAndCheckReplay ( )
2021-10-04 22:21:25 -07:00
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( head . metrics . snapshotReplayErrorTotal ) )
2021-08-06 09:51:01 -07:00
}
}
2021-08-17 10:08:16 -07:00
func TestSnapshotError ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 120 * 4 , wlog . CompressionNone , false )
2021-08-17 10:08:16 -07:00
defer func ( ) {
head . opts . EnableMemorySnapshotOnShutdown = false
require . NoError ( t , head . Close ( ) )
} ( )
// Add a sample.
app := head . Appender ( context . Background ( ) )
2022-03-09 14:17:29 -08:00
lbls := labels . FromStrings ( "foo" , "bar" )
2021-08-17 10:08:16 -07:00
_ , err := app . Append ( 0 , lbls , 99 , 99 )
require . NoError ( t , err )
2023-07-05 02:44:13 -07:00
// Add histograms
hist := tsdbutil . GenerateTestGaugeHistograms ( 1 ) [ 0 ]
floatHist := tsdbutil . GenerateTestGaugeFloatHistograms ( 1 ) [ 0 ]
lblsHist := labels . FromStrings ( "hist" , "bar" )
lblsFloatHist := labels . FromStrings ( "floathist" , "bar" )
_ , err = app . AppendHistogram ( 0 , lblsHist , 99 , hist , nil )
require . NoError ( t , err )
_ , err = app . AppendHistogram ( 0 , lblsFloatHist , 99 , nil , floatHist )
require . NoError ( t , err )
2021-08-17 10:08:16 -07:00
require . NoError ( t , app . Commit ( ) )
// Add some tombstones.
itvs := tombstones . Intervals {
{ Mint : 1234 , Maxt : 2345 } ,
{ Mint : 3456 , Maxt : 4567 } ,
2021-08-11 03:13:17 -07:00
}
2021-08-17 10:08:16 -07:00
head . tombstones . AddInterval ( 1 , itvs ... )
2022-03-03 09:11:19 -08:00
// Check existence of data.
2021-08-17 10:08:16 -07:00
require . NotNil ( t , head . series . getByHash ( lbls . Hash ( ) , lbls ) )
tm , err := head . tombstones . Get ( 1 )
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . NotEmpty ( t , tm )
2021-08-17 10:08:16 -07:00
head . opts . EnableMemorySnapshotOnShutdown = true
require . NoError ( t , head . Close ( ) ) // This will create a snapshot.
// Remove the WAL so that we don't load from it.
require . NoError ( t , os . RemoveAll ( head . wal . Dir ( ) ) )
// Corrupt the snapshot.
snapDir , _ , _ , err := LastChunkSnapshot ( head . opts . ChunkDirRoot )
require . NoError ( t , err )
2022-04-27 02:24:36 -07:00
files , err := os . ReadDir ( snapDir )
2021-08-17 10:08:16 -07:00
require . NoError ( t , err )
f , err := os . OpenFile ( path . Join ( snapDir , files [ 0 ] . Name ( ) ) , os . O_RDWR , 0 )
require . NoError ( t , err )
2024-05-24 19:43:21 -07:00
// Create snapshot backup to be restored on future test cases.
snapshotBackup , err := io . ReadAll ( f )
require . NoError ( t , err )
2021-08-17 10:08:16 -07:00
_ , err = f . WriteAt ( [ ] byte { 0b11111111 } , 18 )
require . NoError ( t , err )
require . NoError ( t , f . Close ( ) )
// Create new Head which should replay this snapshot.
2023-07-11 05:57:57 -07:00
w , err := wlog . NewSize ( nil , nil , head . wal . Dir ( ) , 32768 , wlog . CompressionNone )
2021-08-17 10:08:16 -07:00
require . NoError ( t , err )
2021-10-04 22:21:25 -07:00
// Testing https://github.com/prometheus/prometheus/issues/9437 with the registry.
2022-09-20 10:05:50 -07:00
head , err = NewHead ( prometheus . NewRegistry ( ) , nil , w , nil , head . opts , nil )
2021-08-17 10:08:16 -07:00
require . NoError ( t , err )
require . NoError ( t , head . Init ( math . MinInt64 ) )
// There should be no series in the memory after snapshot error since WAL was removed.
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( head . metrics . snapshotReplayErrorTotal ) )
2024-05-24 19:43:21 -07:00
require . Equal ( t , uint64 ( 0 ) , head . NumSeries ( ) )
2021-08-17 10:08:16 -07:00
require . Nil ( t , head . series . getByHash ( lbls . Hash ( ) , lbls ) )
tm , err = head . tombstones . Get ( 1 )
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Empty ( t , tm )
2024-05-24 19:43:21 -07:00
require . NoError ( t , head . Close ( ) )
// Test corruption in the middle of the snapshot.
f , err = os . OpenFile ( path . Join ( snapDir , files [ 0 ] . Name ( ) ) , os . O_RDWR , 0 )
require . NoError ( t , err )
_ , err = f . WriteAt ( snapshotBackup , 0 )
require . NoError ( t , err )
_ , err = f . WriteAt ( [ ] byte { 0b11111111 } , 300 )
require . NoError ( t , err )
require . NoError ( t , f . Close ( ) )
c := & countSeriesLifecycleCallback { }
opts := head . opts
opts . SeriesCallback = c
w , err = wlog . NewSize ( nil , nil , head . wal . Dir ( ) , 32768 , wlog . CompressionNone )
require . NoError ( t , err )
head , err = NewHead ( prometheus . NewRegistry ( ) , nil , w , nil , head . opts , nil )
require . NoError ( t , err )
require . NoError ( t , head . Init ( math . MinInt64 ) )
// There should be no series in the memory after snapshot error since WAL was removed.
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( head . metrics . snapshotReplayErrorTotal ) )
require . Nil ( t , head . series . getByHash ( lbls . Hash ( ) , lbls ) )
require . Equal ( t , uint64 ( 0 ) , head . NumSeries ( ) )
// Since the snapshot could replay certain series, we continue invoking the create hooks.
// In such instances, we need to ensure that we also trigger the delete hooks when resetting the memory.
require . Equal ( t , int64 ( 2 ) , c . created . Load ( ) )
require . Equal ( t , int64 ( 2 ) , c . deleted . Load ( ) )
require . Equal ( t , 2.0 , prom_testutil . ToFloat64 ( head . metrics . seriesRemoved ) )
require . Equal ( t , 2.0 , prom_testutil . ToFloat64 ( head . metrics . seriesCreated ) )
2021-08-11 03:13:17 -07:00
}
2021-08-31 04:31:19 -07:00
2021-10-13 03:26:50 -07:00
func TestHistogramMetrics ( t * testing . T ) {
2022-12-28 00:55:07 -08:00
numHistograms := 10
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2021-08-31 04:31:19 -07:00
t . Cleanup ( func ( ) {
require . NoError ( t , head . Close ( ) )
} )
require . NoError ( t , head . Init ( 0 ) )
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
expHSeries , expHSamples := 0 , 0
2021-08-31 04:31:19 -07:00
for x := 0 ; x < 5 ; x ++ {
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
expHSeries ++
2022-03-09 14:17:29 -08:00
l := labels . FromStrings ( "a" , fmt . Sprintf ( "b%d" , x ) )
2023-02-10 03:39:33 -08:00
for i , h := range tsdbutil . GenerateTestHistograms ( numHistograms ) {
2021-08-31 04:31:19 -07:00
app := head . Appender ( context . Background ( ) )
2022-12-28 00:55:07 -08:00
_ , err := app . AppendHistogram ( 0 , l , int64 ( i ) , h , nil )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
expHSamples ++
}
2023-02-10 03:39:33 -08:00
for i , fh := range tsdbutil . GenerateTestFloatHistograms ( numHistograms ) {
2022-12-28 00:55:07 -08:00
app := head . Appender ( context . Background ( ) )
_ , err := app . AppendHistogram ( 0 , l , int64 ( numHistograms + i ) , nil , fh )
2021-08-31 04:31:19 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
expHSamples ++
2021-08-31 04:31:19 -07:00
}
}
2022-10-03 00:53:28 -07:00
require . Equal ( t , float64 ( expHSamples ) , prom_testutil . ToFloat64 ( head . metrics . samplesAppended . WithLabelValues ( sampleMetricTypeHistogram ) ) )
2021-08-31 04:31:19 -07:00
require . NoError ( t , head . Close ( ) )
2023-07-11 05:57:57 -07:00
w , err := wlog . NewSize ( nil , nil , head . wal . Dir ( ) , 32768 , wlog . CompressionNone )
2021-08-31 04:31:19 -07:00
require . NoError ( t , err )
2022-10-11 09:35:35 -07:00
head , err = NewHead ( nil , nil , w , nil , head . opts , nil )
2021-08-31 04:31:19 -07:00
require . NoError ( t , err )
require . NoError ( t , head . Init ( 0 ) )
2022-10-03 00:53:28 -07:00
require . Equal ( t , float64 ( 0 ) , prom_testutil . ToFloat64 ( head . metrics . samplesAppended . WithLabelValues ( sampleMetricTypeHistogram ) ) ) // Counter reset.
2021-08-31 04:31:19 -07:00
}
2021-10-01 01:11:51 -07:00
2021-10-13 03:26:50 -07:00
func TestHistogramStaleSample ( t * testing . T ) {
2022-12-28 01:18:56 -08:00
t . Run ( "integer histogram" , func ( t * testing . T ) {
testHistogramStaleSampleHelper ( t , false )
} )
t . Run ( "float histogram" , func ( t * testing . T ) {
testHistogramStaleSampleHelper ( t , true )
} )
}
2022-12-28 00:55:07 -08:00
2022-12-28 01:18:56 -08:00
func testHistogramStaleSampleHelper ( t * testing . T , floatHistogram bool ) {
t . Helper ( )
2022-03-09 14:17:29 -08:00
l := labels . FromStrings ( "a" , "b" )
2021-10-01 01:11:51 -07:00
numHistograms := 20
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 100000 , wlog . CompressionNone , false )
2021-10-01 01:11:51 -07:00
t . Cleanup ( func ( ) {
require . NoError ( t , head . Close ( ) )
} )
require . NoError ( t , head . Init ( 0 ) )
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
type timedHistogram struct {
2022-12-28 01:18:56 -08:00
t int64
h * histogram . Histogram
fh * histogram . FloatHistogram
2021-10-01 01:11:51 -07:00
}
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
expHistograms := make ( [ ] timedHistogram , 0 , numHistograms )
2021-10-01 01:11:51 -07:00
testQuery := func ( numStale int ) {
q , err := NewBlockQuerier ( head , head . MinTime ( ) , head . MaxTime ( ) )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , q . Close ( ) )
} )
2023-09-12 03:37:38 -07:00
ss := q . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2021-10-01 01:11:51 -07:00
require . True ( t , ss . Next ( ) )
s := ss . At ( )
require . False ( t , ss . Next ( ) )
2022-09-20 10:16:45 -07:00
it := s . Iterator ( nil )
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
actHistograms := make ( [ ] timedHistogram , 0 , len ( expHistograms ) )
2022-12-28 01:18:56 -08:00
for typ := it . Next ( ) ; typ != chunkenc . ValNone ; typ = it . Next ( ) {
switch typ {
case chunkenc . ValHistogram :
2024-01-23 08:02:14 -08:00
t , h := it . AtHistogram ( nil )
2022-12-28 01:18:56 -08:00
actHistograms = append ( actHistograms , timedHistogram { t : t , h : h } )
case chunkenc . ValFloatHistogram :
2024-01-23 08:02:14 -08:00
t , h := it . AtFloatHistogram ( nil )
2022-12-28 01:18:56 -08:00
actHistograms = append ( actHistograms , timedHistogram { t : t , fh : h } )
}
2021-10-01 01:11:51 -07:00
}
// We cannot compare StaleNAN with require.Equal, hence checking each histogram manually.
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
require . Equal ( t , len ( expHistograms ) , len ( actHistograms ) )
2021-10-01 01:11:51 -07:00
actNumStale := 0
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
for i , eh := range expHistograms {
ah := actHistograms [ i ]
2022-12-28 01:18:56 -08:00
if floatHistogram {
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch {
case value . IsStaleNaN ( eh . fh . Sum ) :
2022-12-28 01:18:56 -08:00
actNumStale ++
require . True ( t , value . IsStaleNaN ( ah . fh . Sum ) )
// To make require.Equal work.
ah . fh . Sum = 0
eh . fh = eh . fh . Copy ( )
eh . fh . Sum = 0
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case i > 0 :
2023-01-18 08:59:29 -08:00
prev := expHistograms [ i - 1 ]
if prev . fh == nil || value . IsStaleNaN ( prev . fh . Sum ) {
eh . fh . CounterResetHint = histogram . UnknownCounterReset
}
2022-12-28 01:18:56 -08:00
}
require . Equal ( t , eh , ah )
} else {
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch {
case value . IsStaleNaN ( eh . h . Sum ) :
2022-12-28 01:18:56 -08:00
actNumStale ++
require . True ( t , value . IsStaleNaN ( ah . h . Sum ) )
// To make require.Equal work.
ah . h . Sum = 0
eh . h = eh . h . Copy ( )
eh . h . Sum = 0
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case i > 0 :
2023-01-18 08:59:29 -08:00
prev := expHistograms [ i - 1 ]
if prev . h == nil || value . IsStaleNaN ( prev . h . Sum ) {
eh . h . CounterResetHint = histogram . UnknownCounterReset
}
2022-12-28 01:18:56 -08:00
}
require . Equal ( t , eh , ah )
2021-10-01 01:11:51 -07:00
}
}
require . Equal ( t , numStale , actNumStale )
}
// Adding stale in the same appender.
app := head . Appender ( context . Background ( ) )
2023-02-10 03:39:33 -08:00
for _ , h := range tsdbutil . GenerateTestHistograms ( numHistograms ) {
2022-12-28 01:18:56 -08:00
var err error
if floatHistogram {
2023-11-29 06:15:57 -08:00
_ , err = app . AppendHistogram ( 0 , l , 100 * int64 ( len ( expHistograms ) ) , nil , h . ToFloat ( nil ) )
expHistograms = append ( expHistograms , timedHistogram { t : 100 * int64 ( len ( expHistograms ) ) , fh : h . ToFloat ( nil ) } )
2022-12-28 01:18:56 -08:00
} else {
_ , err = app . AppendHistogram ( 0 , l , 100 * int64 ( len ( expHistograms ) ) , h , nil )
expHistograms = append ( expHistograms , timedHistogram { t : 100 * int64 ( len ( expHistograms ) ) , h : h } )
}
2021-10-01 01:11:51 -07:00
require . NoError ( t , err )
}
// +1 so that delta-of-delta is not 0.
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
_ , err := app . Append ( 0 , l , 100 * int64 ( len ( expHistograms ) ) + 1 , math . Float64frombits ( value . StaleNaN ) )
2021-10-01 01:11:51 -07:00
require . NoError ( t , err )
2022-12-28 01:18:56 -08:00
if floatHistogram {
expHistograms = append ( expHistograms , timedHistogram { t : 100 * int64 ( len ( expHistograms ) ) + 1 , fh : & histogram . FloatHistogram { Sum : math . Float64frombits ( value . StaleNaN ) } } )
} else {
expHistograms = append ( expHistograms , timedHistogram { t : 100 * int64 ( len ( expHistograms ) ) + 1 , h : & histogram . Histogram { Sum : math . Float64frombits ( value . StaleNaN ) } } )
}
2021-10-01 01:11:51 -07:00
require . NoError ( t , app . Commit ( ) )
// Only 1 chunk in the memory, no m-mapped chunk.
s := head . series . getByHash ( l . Hash ( ) , l )
require . NotNil ( t , s )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
require . NotNil ( t , s . headChunks )
2023-12-07 03:35:01 -08:00
require . Equal ( t , 1 , s . headChunks . len ( ) )
require . Empty ( t , s . mmappedChunks )
2021-10-01 01:11:51 -07:00
testQuery ( 1 )
// Adding stale in different appender and continuing series after a stale sample.
app = head . Appender ( context . Background ( ) )
2023-02-10 03:39:33 -08:00
for _ , h := range tsdbutil . GenerateTestHistograms ( 2 * numHistograms ) [ numHistograms : ] {
2022-12-28 01:18:56 -08:00
var err error
if floatHistogram {
2023-11-29 06:15:57 -08:00
_ , err = app . AppendHistogram ( 0 , l , 100 * int64 ( len ( expHistograms ) ) , nil , h . ToFloat ( nil ) )
expHistograms = append ( expHistograms , timedHistogram { t : 100 * int64 ( len ( expHistograms ) ) , fh : h . ToFloat ( nil ) } )
2022-12-28 01:18:56 -08:00
} else {
_ , err = app . AppendHistogram ( 0 , l , 100 * int64 ( len ( expHistograms ) ) , h , nil )
expHistograms = append ( expHistograms , timedHistogram { t : 100 * int64 ( len ( expHistograms ) ) , h : h } )
}
2021-10-01 01:11:51 -07:00
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
app = head . Appender ( context . Background ( ) )
// +1 so that delta-of-delta is not 0.
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
_ , err = app . Append ( 0 , l , 100 * int64 ( len ( expHistograms ) ) + 1 , math . Float64frombits ( value . StaleNaN ) )
2021-10-01 01:11:51 -07:00
require . NoError ( t , err )
2022-12-28 01:18:56 -08:00
if floatHistogram {
expHistograms = append ( expHistograms , timedHistogram { t : 100 * int64 ( len ( expHistograms ) ) + 1 , fh : & histogram . FloatHistogram { Sum : math . Float64frombits ( value . StaleNaN ) } } )
} else {
expHistograms = append ( expHistograms , timedHistogram { t : 100 * int64 ( len ( expHistograms ) ) + 1 , h : & histogram . Histogram { Sum : math . Float64frombits ( value . StaleNaN ) } } )
}
2021-10-01 01:11:51 -07:00
require . NoError ( t , app . Commit ( ) )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
head . mmapHeadChunks ( )
2021-10-01 01:11:51 -07:00
// Total 2 chunks, 1 m-mapped.
s = head . series . getByHash ( l . Hash ( ) , l )
require . NotNil ( t , s )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
require . NotNil ( t , s . headChunks )
2023-12-07 03:35:01 -08:00
require . Equal ( t , 1 , s . headChunks . len ( ) )
require . Len ( t , s . mmappedChunks , 1 )
2021-10-01 01:11:51 -07:00
testQuery ( 2 )
}
2021-10-13 03:26:50 -07:00
func TestHistogramCounterResetHeader ( t * testing . T ) {
2023-08-24 06:21:17 -07:00
for _ , floatHisto := range [ ] bool { true } { // FIXME
2022-12-28 00:55:07 -08:00
t . Run ( fmt . Sprintf ( "floatHistogram=%t" , floatHisto ) , func ( t * testing . T ) {
l := labels . FromStrings ( "a" , "b" )
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2022-12-28 00:55:07 -08:00
t . Cleanup ( func ( ) {
require . NoError ( t , head . Close ( ) )
} )
require . NoError ( t , head . Init ( 0 ) )
2021-10-13 03:26:50 -07:00
2022-12-28 00:55:07 -08:00
ts := int64 ( 0 )
appendHistogram := func ( h * histogram . Histogram ) {
ts ++
app := head . Appender ( context . Background ( ) )
var err error
if floatHisto {
2023-11-29 06:15:57 -08:00
_ , err = app . AppendHistogram ( 0 , l , ts , nil , h . ToFloat ( nil ) )
2022-12-28 00:55:07 -08:00
} else {
2023-01-04 02:24:15 -08:00
_ , err = app . AppendHistogram ( 0 , l , ts , h . Copy ( ) , nil )
2022-12-28 00:55:07 -08:00
}
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
}
2021-10-13 03:26:50 -07:00
2022-12-28 00:55:07 -08:00
var expHeaders [ ] chunkenc . CounterResetHeader
checkExpCounterResetHeader := func ( newHeaders ... chunkenc . CounterResetHeader ) {
expHeaders = append ( expHeaders , newHeaders ... )
2021-10-13 03:26:50 -07:00
2022-12-28 00:55:07 -08:00
ms , _ , err := head . getOrCreate ( l . Hash ( ) , l )
require . NoError ( t , err )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
ms . mmapChunks ( head . chunkDiskMapper )
2022-12-28 00:55:07 -08:00
require . Len ( t , ms . mmappedChunks , len ( expHeaders ) - 1 ) // One is the head chunk.
2021-10-13 03:26:50 -07:00
2022-12-28 00:55:07 -08:00
for i , mmapChunk := range ms . mmappedChunks {
chk , err := head . chunkDiskMapper . Chunk ( mmapChunk . ref )
require . NoError ( t , err )
if floatHisto {
require . Equal ( t , expHeaders [ i ] , chk . ( * chunkenc . FloatHistogramChunk ) . GetCounterResetHeader ( ) )
} else {
require . Equal ( t , expHeaders [ i ] , chk . ( * chunkenc . HistogramChunk ) . GetCounterResetHeader ( ) )
}
}
if floatHisto {
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
require . Equal ( t , expHeaders [ len ( expHeaders ) - 1 ] , ms . headChunks . chunk . ( * chunkenc . FloatHistogramChunk ) . GetCounterResetHeader ( ) )
2022-12-28 00:55:07 -08:00
} else {
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
require . Equal ( t , expHeaders [ len ( expHeaders ) - 1 ] , ms . headChunks . chunk . ( * chunkenc . HistogramChunk ) . GetCounterResetHeader ( ) )
2022-12-28 00:55:07 -08:00
}
}
2023-02-10 03:39:33 -08:00
h := tsdbutil . GenerateTestHistograms ( 1 ) [ 0 ]
2022-12-28 00:55:07 -08:00
h . PositiveBuckets = [ ] int64 { 100 , 1 , 1 , 1 }
h . NegativeBuckets = [ ] int64 { 100 , 1 , 1 , 1 }
h . Count = 1000
// First histogram is UnknownCounterReset.
appendHistogram ( h )
checkExpCounterResetHeader ( chunkenc . UnknownCounterReset )
// Another normal histogram.
h . Count ++
appendHistogram ( h )
checkExpCounterResetHeader ( )
// Counter reset via Count.
h . Count --
appendHistogram ( h )
checkExpCounterResetHeader ( chunkenc . CounterReset )
2023-08-24 06:21:17 -07:00
// Add 2 non-counter reset histogram chunks (each chunk targets 1024 bytes which contains ~500 int histogram
// samples or ~1000 float histogram samples).
numAppend := 2000
if floatHisto {
numAppend = 1000
}
for i := 0 ; i < numAppend ; i ++ {
2022-12-28 00:55:07 -08:00
appendHistogram ( h )
}
2023-08-24 06:21:17 -07:00
2022-12-28 00:55:07 -08:00
checkExpCounterResetHeader ( chunkenc . NotCounterReset , chunkenc . NotCounterReset )
// Changing schema will cut a new chunk with unknown counter reset.
h . Schema ++
appendHistogram ( h )
checkExpCounterResetHeader ( chunkenc . UnknownCounterReset )
// Changing schema will zero threshold a new chunk with unknown counter reset.
h . ZeroThreshold += 0.01
appendHistogram ( h )
checkExpCounterResetHeader ( chunkenc . UnknownCounterReset )
// Counter reset by removing a positive bucket.
h . PositiveSpans [ 1 ] . Length --
h . PositiveBuckets = h . PositiveBuckets [ 1 : ]
appendHistogram ( h )
checkExpCounterResetHeader ( chunkenc . CounterReset )
// Counter reset by removing a negative bucket.
h . NegativeSpans [ 1 ] . Length --
h . NegativeBuckets = h . NegativeBuckets [ 1 : ]
appendHistogram ( h )
checkExpCounterResetHeader ( chunkenc . CounterReset )
2023-08-24 06:21:17 -07:00
// Add 2 non-counter reset histogram chunks. Just to have some non-counter reset chunks in between.
for i := 0 ; i < 2000 ; i ++ {
2022-12-28 00:55:07 -08:00
appendHistogram ( h )
}
checkExpCounterResetHeader ( chunkenc . NotCounterReset , chunkenc . NotCounterReset )
// Counter reset with counter reset in a positive bucket.
h . PositiveBuckets [ len ( h . PositiveBuckets ) - 1 ] --
appendHistogram ( h )
checkExpCounterResetHeader ( chunkenc . CounterReset )
// Counter reset with counter reset in a negative bucket.
h . NegativeBuckets [ len ( h . NegativeBuckets ) - 1 ] --
appendHistogram ( h )
checkExpCounterResetHeader ( chunkenc . CounterReset )
} )
}
2021-10-13 03:26:50 -07:00
}
2021-11-26 04:13:27 -08:00
func TestAppendingDifferentEncodingToSameSeries ( t * testing . T ) {
dir := t . TempDir ( )
2022-09-14 05:08:34 -07:00
opts := DefaultOptions ( )
opts . EnableNativeHistograms = true
db , err := Open ( dir , nil , nil , opts , nil )
2021-11-26 04:13:27 -08:00
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
db . DisableCompactions ( )
2023-02-10 03:39:33 -08:00
hists := tsdbutil . GenerateTestHistograms ( 10 )
floatHists := tsdbutil . GenerateTestFloatHistograms ( 10 )
2022-03-09 14:17:29 -08:00
lbls := labels . FromStrings ( "a" , "b" )
2021-11-26 04:13:27 -08:00
2023-08-24 06:21:17 -07:00
var expResult [ ] chunks . Sample
2021-11-26 04:13:27 -08:00
checkExpChunks := func ( count int ) {
ms , created , err := db . Head ( ) . getOrCreate ( lbls . Hash ( ) , lbls )
require . NoError ( t , err )
require . False ( t , created )
require . NotNil ( t , ms )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
require . Equal ( t , count , ms . headChunks . len ( ) )
2021-11-26 04:13:27 -08:00
}
2022-12-28 00:55:07 -08:00
appends := [ ] struct {
2023-08-24 06:21:17 -07:00
samples [ ] chunks . Sample
2022-12-28 00:55:07 -08:00
expChunks int
err error
// If this is empty, samples above will be taken instead of this.
2023-08-24 06:21:17 -07:00
addToExp [ ] chunks . Sample
2022-12-28 00:55:07 -08:00
} {
2023-01-18 08:59:29 -08:00
// Histograms that end up in the expected samples are copied here so that we
// can independently set the CounterResetHint later.
2022-12-28 00:55:07 -08:00
{
2023-08-24 06:21:17 -07:00
samples : [ ] chunks . Sample { sample { t : 100 , h : hists [ 0 ] . Copy ( ) } } ,
2022-12-28 00:55:07 -08:00
expChunks : 1 ,
} ,
{
2023-08-24 06:21:17 -07:00
samples : [ ] chunks . Sample { sample { t : 200 , f : 2 } } ,
2022-12-28 00:55:07 -08:00
expChunks : 2 ,
} ,
{
2023-08-24 06:21:17 -07:00
samples : [ ] chunks . Sample { sample { t : 210 , fh : floatHists [ 0 ] . Copy ( ) } } ,
2022-12-28 00:55:07 -08:00
expChunks : 3 ,
} ,
{
2023-08-24 06:21:17 -07:00
samples : [ ] chunks . Sample { sample { t : 220 , h : hists [ 1 ] . Copy ( ) } } ,
2022-12-28 00:55:07 -08:00
expChunks : 4 ,
} ,
{
2023-08-24 06:21:17 -07:00
samples : [ ] chunks . Sample { sample { t : 230 , fh : floatHists [ 3 ] . Copy ( ) } } ,
2022-12-28 00:55:07 -08:00
expChunks : 5 ,
} ,
{
2023-08-24 06:21:17 -07:00
samples : [ ] chunks . Sample { sample { t : 100 , h : hists [ 2 ] . Copy ( ) } } ,
2022-12-28 00:55:07 -08:00
err : storage . ErrOutOfOrderSample ,
} ,
{
2023-08-24 06:21:17 -07:00
samples : [ ] chunks . Sample { sample { t : 300 , h : hists [ 3 ] . Copy ( ) } } ,
2022-12-28 00:55:07 -08:00
expChunks : 6 ,
} ,
{
2023-08-24 06:21:17 -07:00
samples : [ ] chunks . Sample { sample { t : 100 , f : 2 } } ,
2022-12-28 00:55:07 -08:00
err : storage . ErrOutOfOrderSample ,
} ,
{
2023-08-24 06:21:17 -07:00
samples : [ ] chunks . Sample { sample { t : 100 , fh : floatHists [ 4 ] . Copy ( ) } } ,
2022-12-28 00:55:07 -08:00
err : storage . ErrOutOfOrderSample ,
} ,
{
// Combination of histograms and float64 in the same commit. The behaviour is undefined, but we want to also
// verify how TSDB would behave. Here the histogram is appended at the end, hence will be considered as out of order.
2023-08-24 06:21:17 -07:00
samples : [ ] chunks . Sample {
2023-03-30 10:50:13 -07:00
sample { t : 400 , f : 4 } ,
2022-12-28 00:55:07 -08:00
sample { t : 500 , h : hists [ 5 ] } , // This won't be committed.
2023-03-30 10:50:13 -07:00
sample { t : 600 , f : 6 } ,
2022-12-28 00:55:07 -08:00
} ,
2023-08-24 06:21:17 -07:00
addToExp : [ ] chunks . Sample {
2023-03-30 10:50:13 -07:00
sample { t : 400 , f : 4 } ,
sample { t : 600 , f : 6 } ,
2022-12-28 00:55:07 -08:00
} ,
expChunks : 7 , // Only 1 new chunk for float64.
} ,
{
// Here the histogram is appended at the end, hence the first histogram is out of order.
2023-08-24 06:21:17 -07:00
samples : [ ] chunks . Sample {
2022-12-28 00:55:07 -08:00
sample { t : 700 , h : hists [ 7 ] } , // Out of order w.r.t. the next float64 sample that is appended first.
2023-03-30 10:50:13 -07:00
sample { t : 800 , f : 8 } ,
2022-12-28 00:55:07 -08:00
sample { t : 900 , h : hists [ 9 ] } ,
} ,
2023-08-24 06:21:17 -07:00
addToExp : [ ] chunks . Sample {
2023-03-30 10:50:13 -07:00
sample { t : 800 , f : 8 } ,
2023-01-18 08:59:29 -08:00
sample { t : 900 , h : hists [ 9 ] . Copy ( ) } ,
2022-12-28 00:55:07 -08:00
} ,
expChunks : 8 , // float64 added to old chunk, only 1 new for histograms.
} ,
{
// Float histogram is appended at the end.
2023-08-24 06:21:17 -07:00
samples : [ ] chunks . Sample {
2022-12-28 00:55:07 -08:00
sample { t : 1000 , fh : floatHists [ 7 ] } , // Out of order w.r.t. the next histogram.
sample { t : 1100 , h : hists [ 9 ] } ,
} ,
2023-08-24 06:21:17 -07:00
addToExp : [ ] chunks . Sample {
2023-01-18 08:59:29 -08:00
sample { t : 1100 , h : hists [ 9 ] . Copy ( ) } ,
2022-12-28 00:55:07 -08:00
} ,
expChunks : 8 ,
} ,
}
2021-11-26 04:13:27 -08:00
2022-12-28 00:55:07 -08:00
for _ , a := range appends {
app := db . Appender ( context . Background ( ) )
for _ , s := range a . samples {
var err error
if s . H ( ) != nil || s . FH ( ) != nil {
_ , err = app . AppendHistogram ( 0 , lbls , s . T ( ) , s . H ( ) , s . FH ( ) )
} else {
2023-03-30 10:50:13 -07:00
_ , err = app . Append ( 0 , lbls , s . T ( ) , s . F ( ) )
2022-12-28 00:55:07 -08:00
}
require . Equal ( t , a . err , err )
}
2021-11-26 04:13:27 -08:00
2022-12-28 00:55:07 -08:00
if a . err == nil {
require . NoError ( t , app . Commit ( ) )
if len ( a . addToExp ) > 0 {
expResult = append ( expResult , a . addToExp ... )
} else {
expResult = append ( expResult , a . samples ... )
}
checkExpChunks ( a . expChunks )
} else {
require . NoError ( t , app . Rollback ( ) )
}
}
2023-01-18 08:59:29 -08:00
for i , s := range expResult [ 1 : ] {
switch {
case s . H ( ) != nil && expResult [ i ] . H ( ) == nil :
s . ( sample ) . h . CounterResetHint = histogram . UnknownCounterReset
case s . FH ( ) != nil && expResult [ i ] . FH ( ) == nil :
s . ( sample ) . fh . CounterResetHint = histogram . UnknownCounterReset
}
}
2021-11-26 04:13:27 -08:00
// Query back and expect same order of samples.
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( math . MinInt64 , math . MaxInt64 )
2021-11-26 04:13:27 -08:00
require . NoError ( t , err )
2022-12-28 00:55:07 -08:00
series := query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2023-08-24 06:21:17 -07:00
require . Equal ( t , map [ string ] [ ] chunks . Sample { lbls . String ( ) : expResult } , series )
2021-11-26 04:13:27 -08:00
}
2021-12-15 04:49:33 -08:00
2021-12-08 07:32:14 -08:00
// Tests https://github.com/prometheus/prometheus/issues/9725.
func TestChunkSnapshotReplayBug ( t * testing . T ) {
dir := t . TempDir ( )
2023-07-11 05:57:57 -07:00
wal , err := wlog . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , wlog . CompressionSnappy )
2021-12-08 07:32:14 -08:00
require . NoError ( t , err )
// Write few series records and samples such that the series references are not in order in the WAL
// for status_code="200".
var buf [ ] byte
for i := 1 ; i <= 1000 ; i ++ {
var ref chunks . HeadSeriesRef
if i <= 500 {
ref = chunks . HeadSeriesRef ( i * 100 )
} else {
ref = chunks . HeadSeriesRef ( ( i - 500 ) * 50 )
}
seriesRec := record . RefSeries {
Ref : ref ,
2022-03-09 14:17:29 -08:00
Labels : labels . FromStrings (
"__name__" , "request_duration" ,
"status_code" , "200" ,
"foo" , fmt . Sprintf ( "baz%d" , rand . Int ( ) ) ,
) ,
2021-12-08 07:32:14 -08:00
}
// Add a sample so that the series is not garbage collected.
samplesRec := record . RefSample { Ref : ref , T : 1000 , V : 1000 }
var enc record . Encoder
rec := enc . Series ( [ ] record . RefSeries { seriesRec } , buf )
buf = rec [ : 0 ]
2022-10-10 08:08:46 -07:00
require . NoError ( t , wal . Log ( rec ) )
2021-12-08 07:32:14 -08:00
rec = enc . Samples ( [ ] record . RefSample { samplesRec } , buf )
buf = rec [ : 0 ]
2022-10-10 08:08:46 -07:00
require . NoError ( t , wal . Log ( rec ) )
2021-12-08 07:32:14 -08:00
}
// Write a corrupt snapshot to fail the replay on startup.
snapshotName := chunkSnapshotDir ( 0 , 100 )
cpdir := filepath . Join ( dir , snapshotName )
require . NoError ( t , os . MkdirAll ( cpdir , 0 o777 ) )
2022-04-27 02:24:36 -07:00
err = os . WriteFile ( filepath . Join ( cpdir , "00000000" ) , [ ] byte { 1 , 5 , 3 , 5 , 6 , 7 , 4 , 2 , 2 } , 0 o777 )
2021-12-08 07:32:14 -08:00
require . NoError ( t , err )
opts := DefaultHeadOptions ( )
opts . ChunkDirRoot = dir
opts . EnableMemorySnapshotOnShutdown = true
2022-10-10 08:08:46 -07:00
head , err := NewHead ( nil , nil , wal , nil , opts , nil )
2021-12-08 07:32:14 -08:00
require . NoError ( t , err )
require . NoError ( t , head . Init ( math . MinInt64 ) )
defer func ( ) {
require . NoError ( t , head . Close ( ) )
} ( )
// Snapshot replay should error out.
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( head . metrics . snapshotReplayErrorTotal ) )
// Querying `request_duration{status_code!="200"}` should return no series since all of
// them have status_code="200".
q , err := NewBlockQuerier ( head , math . MinInt64 , math . MaxInt64 )
require . NoError ( t , err )
series := query ( t , q ,
labels . MustNewMatcher ( labels . MatchEqual , "__name__" , "request_duration" ) ,
labels . MustNewMatcher ( labels . MatchNotEqual , "status_code" , "200" ) ,
)
2023-12-07 03:35:01 -08:00
require . Empty ( t , series , "there should be no series found" )
2021-12-08 07:32:14 -08:00
}
func TestChunkSnapshotTakenAfterIncompleteSnapshot ( t * testing . T ) {
dir := t . TempDir ( )
2023-07-11 05:57:57 -07:00
wlTemp , err := wlog . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , wlog . CompressionSnappy )
2021-12-08 07:32:14 -08:00
require . NoError ( t , err )
// Write a snapshot with .tmp suffix. This used to fail taking any further snapshots or replay of snapshots.
snapshotName := chunkSnapshotDir ( 0 , 100 ) + ".tmp"
cpdir := filepath . Join ( dir , snapshotName )
require . NoError ( t , os . MkdirAll ( cpdir , 0 o777 ) )
opts := DefaultHeadOptions ( )
opts . ChunkDirRoot = dir
opts . EnableMemorySnapshotOnShutdown = true
2022-10-10 08:08:46 -07:00
head , err := NewHead ( nil , nil , wlTemp , nil , opts , nil )
2021-12-08 07:32:14 -08:00
require . NoError ( t , err )
require . NoError ( t , head . Init ( math . MinInt64 ) )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( head . metrics . snapshotReplayErrorTotal ) )
// Add some samples for the snapshot.
app := head . Appender ( context . Background ( ) )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 10 , 10 )
2021-12-08 07:32:14 -08:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
// Should not return any error for a successful snapshot.
require . NoError ( t , head . Close ( ) )
// Verify the snapshot.
name , idx , offset , err := LastChunkSnapshot ( dir )
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . NotEqual ( t , "" , name )
2021-12-08 07:32:14 -08:00
require . Equal ( t , 0 , idx )
2024-05-27 12:57:45 -07:00
require . Positive ( t , offset )
2021-12-08 07:32:14 -08:00
}
2022-02-22 07:05:15 -08:00
2023-10-13 05:21:35 -07:00
// TestWBLReplay checks the replay at a low level.
func TestWBLReplay ( t * testing . T ) {
2024-07-15 13:15:41 -07:00
for name , scenario := range sampleTypeScenarios {
t . Run ( name , func ( t * testing . T ) {
testWBLReplay ( t , scenario )
} )
}
}
func testWBLReplay ( t * testing . T , scenario sampleTypeScenario ) {
2022-09-20 10:05:50 -07:00
dir := t . TempDir ( )
2023-07-11 05:57:57 -07:00
wal , err := wlog . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , wlog . CompressionSnappy )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
2023-07-11 05:57:57 -07:00
oooWlog , err := wlog . NewSize ( nil , nil , filepath . Join ( dir , wlog . WblDirName ) , 32768 , wlog . CompressionSnappy )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
opts := DefaultHeadOptions ( )
opts . ChunkRange = 1000
opts . ChunkDirRoot = dir
opts . OutOfOrderTimeWindow . Store ( 30 * time . Minute . Milliseconds ( ) )
2022-10-10 08:08:46 -07:00
h , err := NewHead ( nil , nil , wal , oooWlog , opts , nil )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
require . NoError ( t , h . Init ( 0 ) )
2024-07-15 13:15:41 -07:00
var expOOOSamples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
l := labels . FromStrings ( "foo" , "bar" )
2024-07-15 13:15:41 -07:00
appendSample := func ( mins int64 , val float64 , isOOO bool ) {
2022-09-20 10:05:50 -07:00
app := h . Appender ( context . Background ( ) )
2024-07-15 13:15:41 -07:00
ts , v := mins * time . Minute . Milliseconds ( ) , val
2022-09-20 10:05:50 -07:00
_ , err := app . Append ( 0 , l , ts , v )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
if isOOO {
2023-03-30 10:50:13 -07:00
expOOOSamples = append ( expOOOSamples , sample { t : ts , f : v } )
2022-09-20 10:05:50 -07:00
}
}
// In-order sample.
2024-07-15 13:15:41 -07:00
appendSample ( 60 , 60 , false )
2022-09-20 10:05:50 -07:00
// Out of order samples.
2024-07-15 13:15:41 -07:00
appendSample ( 40 , 40 , true )
appendSample ( 35 , 35 , true )
appendSample ( 50 , 50 , true )
appendSample ( 55 , 55 , true )
appendSample ( 59 , 59 , true )
appendSample ( 31 , 31 , true )
2022-09-20 10:05:50 -07:00
// Check that Head's time ranges are set properly.
require . Equal ( t , 60 * time . Minute . Milliseconds ( ) , h . MinTime ( ) )
require . Equal ( t , 60 * time . Minute . Milliseconds ( ) , h . MaxTime ( ) )
require . Equal ( t , 31 * time . Minute . Milliseconds ( ) , h . MinOOOTime ( ) )
require . Equal ( t , 59 * time . Minute . Milliseconds ( ) , h . MaxOOOTime ( ) )
// Restart head.
require . NoError ( t , h . Close ( ) )
2023-07-11 05:57:57 -07:00
wal , err = wlog . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , wlog . CompressionSnappy )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
2023-07-11 05:57:57 -07:00
oooWlog , err = wlog . NewSize ( nil , nil , filepath . Join ( dir , wlog . WblDirName ) , 32768 , wlog . CompressionSnappy )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
2022-10-10 08:08:46 -07:00
h , err = NewHead ( nil , nil , wal , oooWlog , opts , nil )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
require . NoError ( t , h . Init ( 0 ) ) // Replay happens here.
// Get the ooo samples from the Head.
ms , ok , err := h . getOrCreate ( l . Hash ( ) , l )
require . NoError ( t , err )
require . False ( t , ok )
require . NotNil ( t , ms )
2024-07-08 09:48:27 -07:00
chks , err := ms . ooo . oooHeadChunk . chunk . ToEncodedChunks ( math . MinInt64 , math . MaxInt64 )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
2024-07-08 09:48:27 -07:00
require . Len ( t , chks , 1 )
2022-09-20 10:05:50 -07:00
2024-07-15 13:15:41 -07:00
it := chks [ 0 ] . chunk . Iterator ( nil )
actOOOSamples , err := storage . ExpandSamples ( it , nil )
require . NoError ( t , err )
2022-09-20 10:05:50 -07:00
// OOO chunk will be sorted. Hence sort the expected samples.
sort . Slice ( expOOOSamples , func ( i , j int ) bool {
2024-07-15 13:15:41 -07:00
return expOOOSamples [ i ] . T ( ) < expOOOSamples [ j ] . T ( )
2022-09-20 10:05:50 -07:00
} )
2024-07-15 13:15:41 -07:00
// Passing in true for the 'ignoreCounterResets' parameter prevents differences in counter reset headers
// from being factored in to the sample comparison
// TODO(fionaliao): understand counter reset behaviour, might want to modify this later
requireEqualSamples ( t , l . String ( ) , expOOOSamples , actOOOSamples , true )
2022-09-20 10:05:50 -07:00
require . NoError ( t , h . Close ( ) )
}
// TestOOOMmapReplay checks the replay at a low level.
func TestOOOMmapReplay ( t * testing . T ) {
2024-03-03 11:44:12 -08:00
for name , scenario := range sampleTypeScenarios {
t . Run ( name , func ( t * testing . T ) {
testOOOMmapReplay ( t , scenario )
} )
}
}
func testOOOMmapReplay ( t * testing . T , scenario sampleTypeScenario ) {
2022-09-20 10:05:50 -07:00
dir := t . TempDir ( )
2023-07-11 05:57:57 -07:00
wal , err := wlog . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , wlog . CompressionSnappy )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
2023-07-11 05:57:57 -07:00
oooWlog , err := wlog . NewSize ( nil , nil , filepath . Join ( dir , wlog . WblDirName ) , 32768 , wlog . CompressionSnappy )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
opts := DefaultHeadOptions ( )
opts . ChunkRange = 1000
opts . ChunkDirRoot = dir
opts . OutOfOrderCapMax . Store ( 30 )
opts . OutOfOrderTimeWindow . Store ( 1000 * time . Minute . Milliseconds ( ) )
2022-10-10 08:08:46 -07:00
h , err := NewHead ( nil , nil , wal , oooWlog , opts , nil )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
require . NoError ( t , h . Init ( 0 ) )
l := labels . FromStrings ( "foo" , "bar" )
appendSample := func ( mins int64 ) {
app := h . Appender ( context . Background ( ) )
2024-03-03 11:44:12 -08:00
_ , _ , err := scenario . appendFunc ( app , l , mins * time . Minute . Milliseconds ( ) , mins )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
}
// In-order sample.
appendSample ( 200 )
// Out of order samples. 92 samples to create 3 m-map chunks.
for mins := int64 ( 100 ) ; mins <= 191 ; mins ++ {
appendSample ( mins )
}
ms , ok , err := h . getOrCreate ( l . Hash ( ) , l )
require . NoError ( t , err )
require . False ( t , ok )
require . NotNil ( t , ms )
2022-12-28 02:19:41 -08:00
require . Len ( t , ms . ooo . oooMmappedChunks , 3 )
2022-09-20 10:05:50 -07:00
// Verify that we can access the chunks without error.
2022-12-28 02:19:41 -08:00
for _ , m := range ms . ooo . oooMmappedChunks {
2022-09-20 10:05:50 -07:00
chk , err := h . chunkDiskMapper . Chunk ( m . ref )
require . NoError ( t , err )
require . Equal ( t , int ( m . numSamples ) , chk . NumSamples ( ) )
}
expMmapChunks := make ( [ ] * mmappedChunk , 3 )
2022-12-28 02:19:41 -08:00
copy ( expMmapChunks , ms . ooo . oooMmappedChunks )
2022-09-20 10:05:50 -07:00
// Restart head.
require . NoError ( t , h . Close ( ) )
2023-07-11 05:57:57 -07:00
wal , err = wlog . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , wlog . CompressionSnappy )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
2023-07-11 05:57:57 -07:00
oooWlog , err = wlog . NewSize ( nil , nil , filepath . Join ( dir , wlog . WblDirName ) , 32768 , wlog . CompressionSnappy )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
2022-10-10 08:08:46 -07:00
h , err = NewHead ( nil , nil , wal , oooWlog , opts , nil )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
require . NoError ( t , h . Init ( 0 ) ) // Replay happens here.
// Get the mmap chunks from the Head.
ms , ok , err = h . getOrCreate ( l . Hash ( ) , l )
require . NoError ( t , err )
require . False ( t , ok )
require . NotNil ( t , ms )
2022-12-28 02:19:41 -08:00
require . Len ( t , ms . ooo . oooMmappedChunks , len ( expMmapChunks ) )
2022-09-20 10:05:50 -07:00
// Verify that we can access the chunks without error.
2022-12-28 02:19:41 -08:00
for _ , m := range ms . ooo . oooMmappedChunks {
2022-09-20 10:05:50 -07:00
chk , err := h . chunkDiskMapper . Chunk ( m . ref )
require . NoError ( t , err )
require . Equal ( t , int ( m . numSamples ) , chk . NumSamples ( ) )
}
actMmapChunks := make ( [ ] * mmappedChunk , len ( expMmapChunks ) )
2022-12-28 02:19:41 -08:00
copy ( actMmapChunks , ms . ooo . oooMmappedChunks )
2022-09-20 10:05:50 -07:00
require . Equal ( t , expMmapChunks , actMmapChunks )
require . NoError ( t , h . Close ( ) )
}
func TestHeadInit_DiscardChunksWithUnsupportedEncoding ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
h , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2022-09-20 10:05:50 -07:00
defer func ( ) {
require . NoError ( t , h . Close ( ) )
} ( )
require . NoError ( t , h . Init ( 0 ) )
ctx := context . Background ( )
app := h . Appender ( ctx )
seriesLabels := labels . FromStrings ( "a" , "1" )
var seriesRef storage . SeriesRef
var err error
for i := 0 ; i < 400 ; i ++ {
seriesRef , err = app . Append ( 0 , seriesLabels , int64 ( i ) , float64 ( i ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
require . Greater ( t , prom_testutil . ToFloat64 ( h . metrics . chunksCreated ) , 1.0 )
uc := newUnsupportedChunk ( )
// Make this chunk not overlap with the previous and the next
2023-03-16 03:23:47 -07:00
h . chunkDiskMapper . WriteChunk ( chunks . HeadSeriesRef ( seriesRef ) , 500 , 600 , uc , false , func ( err error ) { require . NoError ( t , err ) } )
2022-09-20 10:05:50 -07:00
app = h . Appender ( ctx )
for i := 700 ; i < 1200 ; i ++ {
_ , err := app . Append ( 0 , seriesLabels , int64 ( i ) , float64 ( i ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
require . Greater ( t , prom_testutil . ToFloat64 ( h . metrics . chunksCreated ) , 4.0 )
series , created , err := h . getOrCreate ( seriesLabels . Hash ( ) , seriesLabels )
require . NoError ( t , err )
require . False ( t , created , "should already exist" )
require . NotNil ( t , series , "should return the series we created above" )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
series . mmapChunks ( h . chunkDiskMapper )
2022-09-20 10:05:50 -07:00
expChunks := make ( [ ] * mmappedChunk , len ( series . mmappedChunks ) )
copy ( expChunks , series . mmappedChunks )
require . NoError ( t , h . Close ( ) )
2023-07-11 05:57:57 -07:00
wal , err := wlog . NewSize ( nil , nil , filepath . Join ( h . opts . ChunkDirRoot , "wal" ) , 32768 , wlog . CompressionNone )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
2022-10-10 08:08:46 -07:00
h , err = NewHead ( nil , nil , wal , nil , h . opts , nil )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
require . NoError ( t , h . Init ( 0 ) )
series , created , err = h . getOrCreate ( seriesLabels . Hash ( ) , seriesLabels )
require . NoError ( t , err )
require . False ( t , created , "should already exist" )
require . NotNil ( t , series , "should return the series we created above" )
require . Equal ( t , expChunks , series . mmappedChunks )
}
const (
UnsupportedMask = 0b10000000
EncUnsupportedXOR = chunkenc . EncXOR | UnsupportedMask
)
// unsupportedChunk holds a XORChunk and overrides the Encoding() method.
type unsupportedChunk struct {
* chunkenc . XORChunk
}
func newUnsupportedChunk ( ) * unsupportedChunk {
return & unsupportedChunk { chunkenc . NewXORChunk ( ) }
}
func ( c * unsupportedChunk ) Encoding ( ) chunkenc . Encoding {
return EncUnsupportedXOR
}
2022-02-22 07:05:15 -08:00
// Tests https://github.com/prometheus/prometheus/issues/10277.
func TestMmapPanicAfterMmapReplayCorruption ( t * testing . T ) {
dir := t . TempDir ( )
2023-07-11 05:57:57 -07:00
wal , err := wlog . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , wlog . CompressionNone )
2022-02-22 07:05:15 -08:00
require . NoError ( t , err )
opts := DefaultHeadOptions ( )
opts . ChunkRange = DefaultBlockDuration
opts . ChunkDirRoot = dir
opts . EnableExemplarStorage = true
opts . MaxExemplars . Store ( config . DefaultExemplarsConfig . MaxExemplars )
2022-10-10 08:08:46 -07:00
h , err := NewHead ( nil , nil , wal , nil , opts , nil )
2022-02-22 07:05:15 -08:00
require . NoError ( t , err )
require . NoError ( t , h . Init ( 0 ) )
lastTs := int64 ( 0 )
var ref storage . SeriesRef
lbls := labels . FromStrings ( "__name__" , "testing" , "foo" , "bar" )
addChunks := func ( ) {
interval := DefaultBlockDuration / ( 4 * 120 )
app := h . Appender ( context . Background ( ) )
for i := 0 ; i < 250 ; i ++ {
ref , err = app . Append ( ref , lbls , lastTs , float64 ( lastTs ) )
lastTs += interval
if i % 10 == 0 {
require . NoError ( t , app . Commit ( ) )
app = h . Appender ( context . Background ( ) )
}
}
require . NoError ( t , app . Commit ( ) )
}
addChunks ( )
require . NoError ( t , h . Close ( ) )
2023-07-11 05:57:57 -07:00
wal , err = wlog . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , wlog . CompressionNone )
2022-02-22 07:05:15 -08:00
require . NoError ( t , err )
mmapFilePath := filepath . Join ( dir , "chunks_head" , "000001" )
f , err := os . OpenFile ( mmapFilePath , os . O_WRONLY , 0 o666 )
require . NoError ( t , err )
_ , err = f . WriteAt ( [ ] byte { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 } , 17 )
require . NoError ( t , err )
require . NoError ( t , f . Close ( ) )
2022-10-10 08:08:46 -07:00
h , err = NewHead ( nil , nil , wal , nil , opts , nil )
2022-02-22 07:05:15 -08:00
require . NoError ( t , err )
require . NoError ( t , h . Init ( 0 ) )
addChunks ( )
require . NoError ( t , h . Close ( ) )
}
2022-02-25 07:53:40 -08:00
// Tests https://github.com/prometheus/prometheus/issues/10277.
func TestReplayAfterMmapReplayError ( t * testing . T ) {
dir := t . TempDir ( )
var h * Head
var err error
openHead := func ( ) {
2023-07-11 05:57:57 -07:00
wal , err := wlog . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , wlog . CompressionNone )
2022-02-25 07:53:40 -08:00
require . NoError ( t , err )
opts := DefaultHeadOptions ( )
opts . ChunkRange = DefaultBlockDuration
opts . ChunkDirRoot = dir
opts . EnableMemorySnapshotOnShutdown = true
opts . MaxExemplars . Store ( config . DefaultExemplarsConfig . MaxExemplars )
2022-10-10 08:08:46 -07:00
h , err = NewHead ( nil , nil , wal , nil , opts , nil )
2022-02-25 07:53:40 -08:00
require . NoError ( t , err )
require . NoError ( t , h . Init ( 0 ) )
}
openHead ( )
itvl := int64 ( 15 * time . Second / time . Millisecond )
lastTs := int64 ( 0 )
lbls := labels . FromStrings ( "__name__" , "testing" , "foo" , "bar" )
2023-08-24 06:21:17 -07:00
var expSamples [ ] chunks . Sample
2022-02-25 07:53:40 -08:00
addSamples := func ( numSamples int ) {
app := h . Appender ( context . Background ( ) )
var ref storage . SeriesRef
for i := 0 ; i < numSamples ; i ++ {
ref , err = app . Append ( ref , lbls , lastTs , float64 ( lastTs ) )
2023-03-30 10:50:13 -07:00
expSamples = append ( expSamples , sample { t : lastTs , f : float64 ( lastTs ) } )
2022-02-25 07:53:40 -08:00
require . NoError ( t , err )
lastTs += itvl
if i % 10 == 0 {
require . NoError ( t , app . Commit ( ) )
app = h . Appender ( context . Background ( ) )
}
}
require . NoError ( t , app . Commit ( ) )
}
// Creating multiple m-map files.
for i := 0 ; i < 5 ; i ++ {
addSamples ( 250 )
require . NoError ( t , h . Close ( ) )
if i != 4 {
// Don't open head for the last iteration.
openHead ( )
}
}
2022-04-27 02:24:36 -07:00
files , err := os . ReadDir ( filepath . Join ( dir , "chunks_head" ) )
2023-12-07 03:35:01 -08:00
require . Len ( t , files , 5 )
2022-02-25 07:53:40 -08:00
// Corrupt a m-map file.
mmapFilePath := filepath . Join ( dir , "chunks_head" , "000002" )
f , err := os . OpenFile ( mmapFilePath , os . O_WRONLY , 0 o666 )
require . NoError ( t , err )
_ , err = f . WriteAt ( [ ] byte { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 } , 17 )
require . NoError ( t , err )
require . NoError ( t , f . Close ( ) )
openHead ( )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
h . mmapHeadChunks ( )
2022-02-25 07:53:40 -08:00
// There should be less m-map files due to corruption.
2022-04-27 02:24:36 -07:00
files , err = os . ReadDir ( filepath . Join ( dir , "chunks_head" ) )
2023-12-07 03:35:01 -08:00
require . Len ( t , files , 2 )
2022-02-25 07:53:40 -08:00
// Querying should not panic.
q , err := NewBlockQuerier ( h , 0 , lastTs )
require . NoError ( t , err )
res := query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "__name__" , "testing" ) )
2023-08-24 06:21:17 -07:00
require . Equal ( t , map [ string ] [ ] chunks . Sample { lbls . String ( ) : expSamples } , res )
2022-02-25 07:53:40 -08:00
require . NoError ( t , h . Close ( ) )
}
2022-07-29 07:52:49 -07:00
2022-09-20 10:05:50 -07:00
func TestOOOAppendWithNoSeries ( t * testing . T ) {
2024-03-03 11:44:12 -08:00
for name , scenario := range sampleTypeScenarios {
t . Run ( name , func ( t * testing . T ) {
testOOOAppendWithNoSeries ( t , scenario . appendFunc )
} )
}
}
func testOOOAppendWithNoSeries ( t * testing . T , appendFunc func ( appender storage . Appender , lbls labels . Labels , ts , value int64 ) ( storage . SeriesRef , sample , error ) ) {
2022-09-20 10:05:50 -07:00
dir := t . TempDir ( )
2023-07-11 05:57:57 -07:00
wal , err := wlog . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , wlog . CompressionSnappy )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
2023-07-11 05:57:57 -07:00
oooWlog , err := wlog . NewSize ( nil , nil , filepath . Join ( dir , wlog . WblDirName ) , 32768 , wlog . CompressionSnappy )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
opts := DefaultHeadOptions ( )
opts . ChunkDirRoot = dir
opts . OutOfOrderCapMax . Store ( 30 )
opts . OutOfOrderTimeWindow . Store ( 120 * time . Minute . Milliseconds ( ) )
2022-10-10 08:08:46 -07:00
h , err := NewHead ( nil , nil , wal , oooWlog , opts , nil )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , h . Close ( ) )
} )
require . NoError ( t , h . Init ( 0 ) )
appendSample := func ( lbls labels . Labels , ts int64 ) {
app := h . Appender ( context . Background ( ) )
2024-03-03 11:44:12 -08:00
_ , _ , err := appendFunc ( app , lbls , ts * time . Minute . Milliseconds ( ) , ts )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
}
verifyOOOSamples := func ( lbls labels . Labels , expSamples int ) {
ms , created , err := h . getOrCreate ( lbls . Hash ( ) , lbls )
require . NoError ( t , err )
require . False ( t , created )
require . NotNil ( t , ms )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
require . Nil ( t , ms . headChunks )
2022-12-28 02:19:41 -08:00
require . NotNil ( t , ms . ooo . oooHeadChunk )
require . Equal ( t , expSamples , ms . ooo . oooHeadChunk . chunk . NumSamples ( ) )
2022-09-20 10:05:50 -07:00
}
verifyInOrderSamples := func ( lbls labels . Labels , expSamples int ) {
ms , created , err := h . getOrCreate ( lbls . Hash ( ) , lbls )
require . NoError ( t , err )
require . False ( t , created )
require . NotNil ( t , ms )
2022-12-28 02:19:41 -08:00
require . Nil ( t , ms . ooo )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
require . NotNil ( t , ms . headChunks )
require . Equal ( t , expSamples , ms . headChunks . chunk . NumSamples ( ) )
2022-09-20 10:05:50 -07:00
}
2024-05-13 08:36:19 -07:00
newLabels := func ( idx int ) labels . Labels { return labels . FromStrings ( "foo" , strconv . Itoa ( idx ) ) }
2022-09-20 10:05:50 -07:00
s1 := newLabels ( 1 )
appendSample ( s1 , 300 ) // At 300m.
verifyInOrderSamples ( s1 , 1 )
// At 239m, the sample cannot be appended to in-order chunk since it is
// beyond the minValidTime. So it should go in OOO chunk.
// Series does not exist for s2 yet.
s2 := newLabels ( 2 )
appendSample ( s2 , 239 ) // OOO sample.
verifyOOOSamples ( s2 , 1 )
// Similar for 180m.
s3 := newLabels ( 3 )
appendSample ( s3 , 180 ) // OOO sample.
verifyOOOSamples ( s3 , 1 )
// Now 179m is too old.
s4 := newLabels ( 4 )
app := h . Appender ( context . Background ( ) )
2024-03-03 11:44:12 -08:00
_ , _ , err = appendFunc ( app , s4 , 179 * time . Minute . Milliseconds ( ) , 179 )
2022-09-20 10:05:50 -07:00
require . Equal ( t , storage . ErrTooOldSample , err )
require . NoError ( t , app . Rollback ( ) )
verifyOOOSamples ( s3 , 1 )
// Samples still go into in-order chunk for samples within
// appendable minValidTime.
s5 := newLabels ( 5 )
appendSample ( s5 , 240 )
verifyInOrderSamples ( s5 , 1 )
}
func TestHeadMinOOOTimeUpdate ( t * testing . T ) {
2024-03-03 11:44:12 -08:00
for name , scenario := range sampleTypeScenarios {
t . Run ( name , func ( t * testing . T ) {
testHeadMinOOOTimeUpdate ( t , scenario )
} )
}
}
func testHeadMinOOOTimeUpdate ( t * testing . T , scenario sampleTypeScenario ) {
2022-09-20 10:05:50 -07:00
dir := t . TempDir ( )
2023-07-11 05:57:57 -07:00
wal , err := wlog . NewSize ( nil , nil , filepath . Join ( dir , "wal" ) , 32768 , wlog . CompressionSnappy )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
2023-07-11 05:57:57 -07:00
oooWlog , err := wlog . NewSize ( nil , nil , filepath . Join ( dir , wlog . WblDirName ) , 32768 , wlog . CompressionSnappy )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
opts := DefaultHeadOptions ( )
opts . ChunkDirRoot = dir
opts . OutOfOrderTimeWindow . Store ( 10 * time . Minute . Milliseconds ( ) )
2022-10-10 08:08:46 -07:00
h , err := NewHead ( nil , nil , wal , oooWlog , opts , nil )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , h . Close ( ) )
} )
require . NoError ( t , h . Init ( 0 ) )
appendSample := func ( ts int64 ) {
app := h . Appender ( context . Background ( ) )
2024-03-07 09:41:03 -08:00
_ , _ , err = scenario . appendFunc ( app , labels . FromStrings ( "a" , "b" ) , ts * time . Minute . Milliseconds ( ) , ts )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
}
appendSample ( 300 ) // In-order sample.
require . Equal ( t , int64 ( math . MaxInt64 ) , h . MinOOOTime ( ) )
appendSample ( 295 ) // OOO sample.
require . Equal ( t , 295 * time . Minute . Milliseconds ( ) , h . MinOOOTime ( ) )
// Allowed window for OOO is >=290, which is before the earliest ooo sample 295, so it gets set to the lower value.
require . NoError ( t , h . truncateOOO ( 0 , 1 ) )
require . Equal ( t , 290 * time . Minute . Milliseconds ( ) , h . MinOOOTime ( ) )
appendSample ( 310 ) // In-order sample.
appendSample ( 305 ) // OOO sample.
require . Equal ( t , 290 * time . Minute . Milliseconds ( ) , h . MinOOOTime ( ) )
// Now the OOO sample 295 was not gc'ed yet. And allowed window for OOO is now >=300.
// So the lowest among them, 295, is set as minOOOTime.
require . NoError ( t , h . truncateOOO ( 0 , 2 ) )
require . Equal ( t , 295 * time . Minute . Milliseconds ( ) , h . MinOOOTime ( ) )
2022-07-29 07:52:49 -07:00
}
2023-01-04 02:24:15 -08:00
func TestGaugeHistogramWALAndChunkHeader ( t * testing . T ) {
l := labels . FromStrings ( "a" , "b" )
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2023-01-04 02:24:15 -08:00
t . Cleanup ( func ( ) {
require . NoError ( t , head . Close ( ) )
} )
require . NoError ( t , head . Init ( 0 ) )
2023-01-10 15:49:13 -08:00
ts := int64 ( 0 )
appendHistogram := func ( h * histogram . Histogram ) {
ts ++
app := head . Appender ( context . Background ( ) )
_ , err := app . AppendHistogram ( 0 , l , ts , h . Copy ( ) , nil )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
}
2023-02-10 03:39:33 -08:00
hists := tsdbutil . GenerateTestGaugeHistograms ( 5 )
2023-01-10 15:49:13 -08:00
hists [ 0 ] . CounterResetHint = histogram . UnknownCounterReset
appendHistogram ( hists [ 0 ] )
appendHistogram ( hists [ 1 ] )
appendHistogram ( hists [ 2 ] )
hists [ 3 ] . CounterResetHint = histogram . UnknownCounterReset
appendHistogram ( hists [ 3 ] )
appendHistogram ( hists [ 3 ] )
appendHistogram ( hists [ 4 ] )
checkHeaders := func ( ) {
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
head . mmapHeadChunks ( )
2023-01-10 15:49:13 -08:00
ms , _ , err := head . getOrCreate ( l . Hash ( ) , l )
require . NoError ( t , err )
require . Len ( t , ms . mmappedChunks , 3 )
expHeaders := [ ] chunkenc . CounterResetHeader {
chunkenc . UnknownCounterReset ,
chunkenc . GaugeType ,
chunkenc . UnknownCounterReset ,
chunkenc . GaugeType ,
}
for i , mmapChunk := range ms . mmappedChunks {
chk , err := head . chunkDiskMapper . Chunk ( mmapChunk . ref )
require . NoError ( t , err )
require . Equal ( t , expHeaders [ i ] , chk . ( * chunkenc . HistogramChunk ) . GetCounterResetHeader ( ) )
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
require . Equal ( t , expHeaders [ len ( expHeaders ) - 1 ] , ms . headChunks . chunk . ( * chunkenc . HistogramChunk ) . GetCounterResetHeader ( ) )
2023-01-10 15:49:13 -08:00
}
checkHeaders ( )
recs := readTestWAL ( t , head . wal . Dir ( ) )
require . Equal ( t , [ ] interface { } {
[ ] record . RefSeries {
{
Ref : 1 ,
Labels : labels . FromStrings ( "a" , "b" ) ,
} ,
} ,
[ ] record . RefHistogramSample { { Ref : 1 , T : 1 , H : hists [ 0 ] } } ,
[ ] record . RefHistogramSample { { Ref : 1 , T : 2 , H : hists [ 1 ] } } ,
[ ] record . RefHistogramSample { { Ref : 1 , T : 3 , H : hists [ 2 ] } } ,
[ ] record . RefHistogramSample { { Ref : 1 , T : 4 , H : hists [ 3 ] } } ,
[ ] record . RefHistogramSample { { Ref : 1 , T : 5 , H : hists [ 3 ] } } ,
[ ] record . RefHistogramSample { { Ref : 1 , T : 6 , H : hists [ 4 ] } } ,
} , recs )
// Restart Head without mmap chunks to expect the WAL replay to recognize gauge histograms.
require . NoError ( t , head . Close ( ) )
require . NoError ( t , os . RemoveAll ( mmappedChunksDir ( head . opts . ChunkDirRoot ) ) )
2023-07-11 05:57:57 -07:00
w , err := wlog . NewSize ( nil , nil , head . wal . Dir ( ) , 32768 , wlog . CompressionNone )
2023-01-10 15:49:13 -08:00
require . NoError ( t , err )
head , err = NewHead ( nil , nil , w , nil , head . opts , nil )
require . NoError ( t , err )
require . NoError ( t , head . Init ( 0 ) )
checkHeaders ( )
}
func TestGaugeFloatHistogramWALAndChunkHeader ( t * testing . T ) {
l := labels . FromStrings ( "a" , "b" )
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 1000 , wlog . CompressionNone , false )
2023-01-10 15:49:13 -08:00
t . Cleanup ( func ( ) {
require . NoError ( t , head . Close ( ) )
} )
require . NoError ( t , head . Init ( 0 ) )
2023-01-04 02:24:15 -08:00
ts := int64 ( 0 )
appendHistogram := func ( h * histogram . FloatHistogram ) {
ts ++
app := head . Appender ( context . Background ( ) )
_ , err := app . AppendHistogram ( 0 , l , ts , nil , h . Copy ( ) )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
}
2023-02-10 03:39:33 -08:00
hists := tsdbutil . GenerateTestGaugeFloatHistograms ( 5 )
2023-01-04 02:24:15 -08:00
hists [ 0 ] . CounterResetHint = histogram . UnknownCounterReset
appendHistogram ( hists [ 0 ] )
appendHistogram ( hists [ 1 ] )
appendHistogram ( hists [ 2 ] )
hists [ 3 ] . CounterResetHint = histogram . UnknownCounterReset
appendHistogram ( hists [ 3 ] )
appendHistogram ( hists [ 3 ] )
appendHistogram ( hists [ 4 ] )
checkHeaders := func ( ) {
ms , _ , err := head . getOrCreate ( l . Hash ( ) , l )
require . NoError ( t , err )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
head . mmapHeadChunks ( )
2023-01-04 02:24:15 -08:00
require . Len ( t , ms . mmappedChunks , 3 )
expHeaders := [ ] chunkenc . CounterResetHeader {
chunkenc . UnknownCounterReset ,
chunkenc . GaugeType ,
chunkenc . UnknownCounterReset ,
chunkenc . GaugeType ,
}
for i , mmapChunk := range ms . mmappedChunks {
chk , err := head . chunkDiskMapper . Chunk ( mmapChunk . ref )
require . NoError ( t , err )
require . Equal ( t , expHeaders [ i ] , chk . ( * chunkenc . FloatHistogramChunk ) . GetCounterResetHeader ( ) )
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
require . Equal ( t , expHeaders [ len ( expHeaders ) - 1 ] , ms . headChunks . chunk . ( * chunkenc . FloatHistogramChunk ) . GetCounterResetHeader ( ) )
2023-01-04 02:24:15 -08:00
}
checkHeaders ( )
recs := readTestWAL ( t , head . wal . Dir ( ) )
require . Equal ( t , [ ] interface { } {
[ ] record . RefSeries {
{
Ref : 1 ,
Labels : labels . FromStrings ( "a" , "b" ) ,
} ,
} ,
[ ] record . RefFloatHistogramSample { { Ref : 1 , T : 1 , FH : hists [ 0 ] } } ,
[ ] record . RefFloatHistogramSample { { Ref : 1 , T : 2 , FH : hists [ 1 ] } } ,
[ ] record . RefFloatHistogramSample { { Ref : 1 , T : 3 , FH : hists [ 2 ] } } ,
[ ] record . RefFloatHistogramSample { { Ref : 1 , T : 4 , FH : hists [ 3 ] } } ,
[ ] record . RefFloatHistogramSample { { Ref : 1 , T : 5 , FH : hists [ 3 ] } } ,
[ ] record . RefFloatHistogramSample { { Ref : 1 , T : 6 , FH : hists [ 4 ] } } ,
} , recs )
// Restart Head without mmap chunks to expect the WAL replay to recognize gauge histograms.
require . NoError ( t , head . Close ( ) )
require . NoError ( t , os . RemoveAll ( mmappedChunksDir ( head . opts . ChunkDirRoot ) ) )
2023-07-11 05:57:57 -07:00
w , err := wlog . NewSize ( nil , nil , head . wal . Dir ( ) , 32768 , wlog . CompressionNone )
2023-01-04 02:24:15 -08:00
require . NoError ( t , err )
head , err = NewHead ( nil , nil , w , nil , head . opts , nil )
require . NoError ( t , err )
require . NoError ( t , head . Init ( 0 ) )
checkHeaders ( )
}
2023-03-01 04:21:02 -08:00
func TestSnapshotAheadOfWALError ( t * testing . T ) {
2023-07-11 05:57:57 -07:00
head , _ := newTestHead ( t , 120 * 4 , wlog . CompressionNone , false )
2023-03-01 04:21:02 -08:00
head . opts . EnableMemorySnapshotOnShutdown = true
// Add a sample to fill WAL.
app := head . Appender ( context . Background ( ) )
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 10 , 10 )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
// Increment snapshot index to create sufficiently large difference.
for i := 0 ; i < 2 ; i ++ {
_ , err = head . wal . NextSegment ( )
require . NoError ( t , err )
}
require . NoError ( t , head . Close ( ) ) // This will create a snapshot.
_ , idx , _ , err := LastChunkSnapshot ( head . opts . ChunkDirRoot )
require . NoError ( t , err )
require . Equal ( t , 2 , idx )
// Restart the WAL while keeping the old snapshot. The new head is created manually in this case in order
// to keep using the same snapshot directory instead of a random one.
require . NoError ( t , os . RemoveAll ( head . wal . Dir ( ) ) )
head . opts . EnableMemorySnapshotOnShutdown = false
2023-07-11 05:57:57 -07:00
w , _ := wlog . NewSize ( nil , nil , head . wal . Dir ( ) , 32768 , wlog . CompressionNone )
2023-03-01 04:21:02 -08:00
head , err = NewHead ( nil , nil , w , nil , head . opts , nil )
require . NoError ( t , err )
// Add a sample to fill WAL.
app = head . Appender ( context . Background ( ) )
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 10 , 10 )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
lastSegment , _ , _ := w . LastSegmentAndOffset ( )
require . Equal ( t , 0 , lastSegment )
require . NoError ( t , head . Close ( ) )
// New WAL is saved, but old snapshot still exists.
_ , idx , _ , err = LastChunkSnapshot ( head . opts . ChunkDirRoot )
require . NoError ( t , err )
require . Equal ( t , 2 , idx )
// Create new Head which should detect the incorrect index and delete the snapshot.
head . opts . EnableMemorySnapshotOnShutdown = true
2023-07-11 05:57:57 -07:00
w , _ = wlog . NewSize ( nil , nil , head . wal . Dir ( ) , 32768 , wlog . CompressionNone )
2023-03-01 04:21:02 -08:00
head , err = NewHead ( nil , nil , w , nil , head . opts , nil )
require . NoError ( t , err )
require . NoError ( t , head . Init ( math . MinInt64 ) )
// Verify that snapshot directory does not exist anymore.
_ , _ , _ , err = LastChunkSnapshot ( head . opts . ChunkDirRoot )
require . Equal ( t , record . ErrNotFound , err )
require . NoError ( t , head . Close ( ) )
}
2023-08-24 06:21:17 -07:00
func BenchmarkCuttingHeadHistogramChunks ( b * testing . B ) {
const (
numSamples = 50000
numBuckets = 100
)
2023-10-13 00:58:48 -07:00
samples := histogram . GenerateBigTestHistograms ( numSamples , numBuckets )
2023-08-24 06:21:17 -07:00
h , _ := newTestHead ( b , DefaultBlockDuration , wlog . CompressionNone , false )
defer func ( ) {
require . NoError ( b , h . Close ( ) )
} ( )
a := h . Appender ( context . Background ( ) )
ts := time . Now ( ) . UnixMilli ( )
lbls := labels . FromStrings ( "foo" , "bar" )
b . ResetTimer ( )
for _ , s := range samples {
_ , err := a . AppendHistogram ( 0 , lbls , ts , s , nil )
require . NoError ( b , err )
}
}
func TestCuttingNewHeadChunks ( t * testing . T ) {
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2023-08-24 06:21:17 -07:00
testCases := map [ string ] struct {
numTotalSamples int
timestampJitter bool
floatValFunc func ( i int ) float64
histValFunc func ( i int ) * histogram . Histogram
expectedChks [ ] struct {
numSamples int
numBytes int
}
} {
"float samples" : {
numTotalSamples : 180 ,
floatValFunc : func ( i int ) float64 {
return 1.
} ,
expectedChks : [ ] struct {
numSamples int
numBytes int
} {
{ numSamples : 120 , numBytes : 46 } ,
{ numSamples : 60 , numBytes : 32 } ,
} ,
} ,
"large float samples" : {
// Normally 120 samples would fit into a single chunk but these chunks violate the 1005 byte soft cap.
numTotalSamples : 120 ,
timestampJitter : true ,
floatValFunc : func ( i int ) float64 {
// Flipping between these two make each sample val take at least 64 bits.
vals := [ ] float64 { math . MaxFloat64 , 0x00 }
return vals [ i % len ( vals ) ]
} ,
expectedChks : [ ] struct {
numSamples int
numBytes int
} {
{ 99 , 1008 } ,
{ 21 , 219 } ,
} ,
} ,
"small histograms" : {
numTotalSamples : 240 ,
histValFunc : func ( ) func ( i int ) * histogram . Histogram {
2023-10-13 00:58:48 -07:00
hists := histogram . GenerateBigTestHistograms ( 240 , 10 )
2023-08-24 06:21:17 -07:00
return func ( i int ) * histogram . Histogram {
return hists [ i ]
}
} ( ) ,
expectedChks : [ ] struct {
numSamples int
numBytes int
} {
{ 120 , 1087 } ,
{ 120 , 1039 } ,
} ,
} ,
"large histograms" : {
numTotalSamples : 240 ,
histValFunc : func ( ) func ( i int ) * histogram . Histogram {
2023-10-13 00:58:48 -07:00
hists := histogram . GenerateBigTestHistograms ( 240 , 100 )
2023-08-24 06:21:17 -07:00
return func ( i int ) * histogram . Histogram {
return hists [ i ]
}
} ( ) ,
expectedChks : [ ] struct {
numSamples int
numBytes int
} {
2023-10-13 00:58:26 -07:00
{ 40 , 896 } ,
{ 40 , 899 } ,
{ 40 , 896 } ,
{ 30 , 690 } ,
2023-08-24 06:21:17 -07:00
{ 30 , 691 } ,
{ 30 , 694 } ,
2023-10-13 00:58:26 -07:00
{ 30 , 693 } ,
2023-08-24 06:21:17 -07:00
} ,
} ,
"really large histograms" : {
// Really large histograms; each chunk can only contain a single histogram but we have a 10 sample minimum
// per chunk.
numTotalSamples : 11 ,
histValFunc : func ( ) func ( i int ) * histogram . Histogram {
2023-10-13 00:58:48 -07:00
hists := histogram . GenerateBigTestHistograms ( 11 , 100000 )
2023-08-24 06:21:17 -07:00
return func ( i int ) * histogram . Histogram {
return hists [ i ]
}
} ( ) ,
expectedChks : [ ] struct {
numSamples int
numBytes int
} {
{ 10 , 200103 } ,
{ 1 , 87540 } ,
} ,
} ,
}
for testName , tc := range testCases {
t . Run ( testName , func ( t * testing . T ) {
h , _ := newTestHead ( t , DefaultBlockDuration , wlog . CompressionNone , false )
defer func ( ) {
require . NoError ( t , h . Close ( ) )
} ( )
a := h . Appender ( context . Background ( ) )
ts := int64 ( 10000 )
lbls := labels . FromStrings ( "foo" , "bar" )
jitter := [ ] int64 { 0 , 1 } // A bit of jitter to prevent dod=0.
for i := 0 ; i < tc . numTotalSamples ; i ++ {
if tc . floatValFunc != nil {
_ , err := a . Append ( 0 , lbls , ts , tc . floatValFunc ( i ) )
require . NoError ( t , err )
} else if tc . histValFunc != nil {
_ , err := a . AppendHistogram ( 0 , lbls , ts , tc . histValFunc ( i ) , nil )
require . NoError ( t , err )
}
ts += int64 ( 60 * time . Second / time . Millisecond )
if tc . timestampJitter {
ts += jitter [ i % len ( jitter ) ]
}
}
require . NoError ( t , a . Commit ( ) )
idxReader , err := h . Index ( )
require . NoError ( t , err )
chkReader , err := h . Chunks ( )
require . NoError ( t , err )
2023-09-13 08:45:06 -07:00
p , err := idxReader . Postings ( ctx , "foo" , "bar" )
2023-08-24 06:21:17 -07:00
require . NoError ( t , err )
var lblBuilder labels . ScratchBuilder
for p . Next ( ) {
sRef := p . At ( )
chkMetas := make ( [ ] chunks . Meta , len ( tc . expectedChks ) )
require . NoError ( t , idxReader . Series ( sRef , & lblBuilder , & chkMetas ) )
require . Len ( t , chkMetas , len ( tc . expectedChks ) )
for i , expected := range tc . expectedChks {
2023-11-28 02:14:29 -08:00
chk , iterable , err := chkReader . ChunkOrIterable ( chkMetas [ i ] )
2023-08-24 06:21:17 -07:00
require . NoError ( t , err )
2023-11-28 02:14:29 -08:00
require . Nil ( t , iterable )
2023-08-24 06:21:17 -07:00
require . Equal ( t , expected . numSamples , chk . NumSamples ( ) )
require . Len ( t , chk . Bytes ( ) , expected . numBytes )
}
}
} )
}
}
2023-09-20 05:32:20 -07:00
2024-08-08 00:08:10 -07:00
// TestHeadDetectsDuplicateSampleAtSizeLimit tests a regression where a duplicate sample
2023-09-20 05:32:20 -07:00
// is appended to the head, right when the head chunk is at the size limit.
// The test adds all samples as duplicate, thus expecting that the result has
// exactly half of the samples.
func TestHeadDetectsDuplicateSampleAtSizeLimit ( t * testing . T ) {
numSamples := 1000
baseTS := int64 ( 1695209650 )
h , _ := newTestHead ( t , DefaultBlockDuration , wlog . CompressionNone , false )
defer func ( ) {
require . NoError ( t , h . Close ( ) )
} ( )
a := h . Appender ( context . Background ( ) )
var err error
vals := [ ] float64 { math . MaxFloat64 , 0x00 } // Use the worst case scenario for the XOR encoding. Otherwise we hit the sample limit before the size limit.
for i := 0 ; i < numSamples ; i ++ {
ts := baseTS + int64 ( i / 2 ) * 10000
a . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , ts , vals [ ( i / 2 ) % len ( vals ) ] )
err = a . Commit ( )
require . NoError ( t , err )
a = h . Appender ( context . Background ( ) )
}
indexReader , err := h . Index ( )
require . NoError ( t , err )
var (
chunks [ ] chunks . Meta
builder labels . ScratchBuilder
)
require . NoError ( t , indexReader . Series ( 1 , & builder , & chunks ) )
chunkReader , err := h . Chunks ( )
require . NoError ( t , err )
storedSampleCount := 0
for _ , chunkMeta := range chunks {
2023-11-28 02:14:29 -08:00
chunk , iterable , err := chunkReader . ChunkOrIterable ( chunkMeta )
2023-09-20 05:32:20 -07:00
require . NoError ( t , err )
2023-11-28 02:14:29 -08:00
require . Nil ( t , iterable )
2023-09-20 05:32:20 -07:00
storedSampleCount += chunk . NumSamples ( )
}
require . Equal ( t , numSamples / 2 , storedSampleCount )
}
2023-11-11 08:30:16 -08:00
func TestWALSampleAndExemplarOrder ( t * testing . T ) {
lbls := labels . FromStrings ( "foo" , "bar" )
testcases := map [ string ] struct {
appendF func ( app storage . Appender , ts int64 ) ( storage . SeriesRef , error )
expectedType reflect . Type
} {
"float sample" : {
appendF : func ( app storage . Appender , ts int64 ) ( storage . SeriesRef , error ) {
return app . Append ( 0 , lbls , ts , 1.0 )
} ,
expectedType : reflect . TypeOf ( [ ] record . RefSample { } ) ,
} ,
"histogram sample" : {
appendF : func ( app storage . Appender , ts int64 ) ( storage . SeriesRef , error ) {
return app . AppendHistogram ( 0 , lbls , ts , tsdbutil . GenerateTestHistogram ( 1 ) , nil )
} ,
expectedType : reflect . TypeOf ( [ ] record . RefHistogramSample { } ) ,
} ,
"float histogram sample" : {
appendF : func ( app storage . Appender , ts int64 ) ( storage . SeriesRef , error ) {
return app . AppendHistogram ( 0 , lbls , ts , nil , tsdbutil . GenerateTestFloatHistogram ( 1 ) )
} ,
expectedType : reflect . TypeOf ( [ ] record . RefFloatHistogramSample { } ) ,
} ,
}
for testName , tc := range testcases {
t . Run ( testName , func ( t * testing . T ) {
h , w := newTestHead ( t , 1000 , wlog . CompressionNone , false )
defer func ( ) {
require . NoError ( t , h . Close ( ) )
} ( )
app := h . Appender ( context . Background ( ) )
ref , err := tc . appendF ( app , 10 )
require . NoError ( t , err )
app . AppendExemplar ( ref , lbls , exemplar . Exemplar { Value : 1.0 , Ts : 5 } )
app . Commit ( )
recs := readTestWAL ( t , w . Dir ( ) )
require . Len ( t , recs , 3 )
_ , ok := recs [ 0 ] . ( [ ] record . RefSeries )
require . True ( t , ok , "expected first record to be a RefSeries" )
actualType := reflect . TypeOf ( recs [ 1 ] )
require . Equal ( t , tc . expectedType , actualType , "expected second record to be a %s" , tc . expectedType )
_ , ok = recs [ 2 ] . ( [ ] record . RefExemplar )
require . True ( t , ok , "expected third record to be a RefExemplar" )
} )
}
}
2023-11-12 06:51:37 -08:00
// TestHeadCompactionWhileAppendAndCommitExemplar simulates a use case where
// a series is removed from the head while an exemplar is being appended to it.
// This can happen in theory by compacting the head at the right time due to
// a series being idle.
// The test cheats a little bit by not appending a sample with the exemplar.
// If you also add a sample and run Truncate in a concurrent goroutine and run
// the test around a million(!) times, you can get
// `unknown HeadSeriesRef when trying to add exemplar: 1` error on push.
// It is likely that running the test for much longer and with more time variations
// would trigger the
// `signal SIGSEGV: segmentation violation code=0x1 addr=0x20 pc=0xbb03d1`
// panic, that we have seen in the wild once.
func TestHeadCompactionWhileAppendAndCommitExemplar ( t * testing . T ) {
h , _ := newTestHead ( t , DefaultBlockDuration , wlog . CompressionNone , false )
app := h . Appender ( context . Background ( ) )
lbls := labels . FromStrings ( "foo" , "bar" )
ref , err := app . Append ( 0 , lbls , 1 , 1 )
require . NoError ( t , err )
app . Commit ( )
// Not adding a sample here to trigger the fault.
app = h . Appender ( context . Background ( ) )
_ , err = app . AppendExemplar ( ref , lbls , exemplar . Exemplar { Value : 1 , Ts : 20 } )
require . NoError ( t , err )
h . Truncate ( 10 )
app . Commit ( )
h . Close ( )
}
2023-11-27 07:40:30 -08:00
func labelsWithHashCollision ( ) ( labels . Labels , labels . Labels ) {
// These two series have the same XXHash; thanks to https://github.com/pstibrany/labels_hash_collisions
ls1 := labels . FromStrings ( "__name__" , "metric" , "lbl1" , "value" , "lbl2" , "l6CQ5y" )
ls2 := labels . FromStrings ( "__name__" , "metric" , "lbl1" , "value" , "lbl2" , "v7uDlF" )
if ls1 . Hash ( ) != ls2 . Hash ( ) {
// These ones are the same when using -tags stringlabels
ls1 = labels . FromStrings ( "__name__" , "metric" , "lbl" , "HFnEaGl" )
ls2 = labels . FromStrings ( "__name__" , "metric" , "lbl" , "RqcXatm" )
}
if ls1 . Hash ( ) != ls2 . Hash ( ) {
panic ( "This code needs to be updated: find new labels with colliding hash values." )
}
return ls1 , ls2
}
2023-11-28 05:43:35 -08:00
// stripeSeriesWithCollidingSeries returns a stripeSeries with two memSeries having the same, colliding, hash.
func stripeSeriesWithCollidingSeries ( t * testing . T ) ( * stripeSeries , * memSeries , * memSeries ) {
t . Helper ( )
2023-11-27 07:40:30 -08:00
lbls1 , lbls2 := labelsWithHashCollision ( )
ms1 := memSeries {
lset : lbls1 ,
}
ms2 := memSeries {
lset : lbls2 ,
}
hash := lbls1 . Hash ( )
s := newStripeSeries ( 1 , noopSeriesLifecycleCallback { } )
got , created , err := s . getOrSet ( hash , lbls1 , func ( ) * memSeries {
return & ms1
} )
require . NoError ( t , err )
require . True ( t , created )
require . Same ( t , & ms1 , got )
// Add a conflicting series
got , created , err = s . getOrSet ( hash , lbls2 , func ( ) * memSeries {
return & ms2
} )
require . NoError ( t , err )
require . True ( t , created )
require . Same ( t , & ms2 , got )
2023-11-28 05:43:35 -08:00
return s , & ms1 , & ms2
}
func TestStripeSeries_getOrSet ( t * testing . T ) {
s , ms1 , ms2 := stripeSeriesWithCollidingSeries ( t )
hash := ms1 . lset . Hash ( )
2023-11-27 07:40:30 -08:00
// Verify that we can get both of the series despite the hash collision
2023-11-28 05:43:35 -08:00
got := s . getByHash ( hash , ms1 . lset )
require . Same ( t , ms1 , got )
got = s . getByHash ( hash , ms2 . lset )
require . Same ( t , ms2 , got )
}
func TestStripeSeries_gc ( t * testing . T ) {
s , ms1 , ms2 := stripeSeriesWithCollidingSeries ( t )
hash := ms1 . lset . Hash ( )
s . gc ( 0 , 0 )
// Verify that we can get neither ms1 nor ms2 after gc-ing corresponding series
got := s . getByHash ( hash , ms1 . lset )
require . Nil ( t , got )
got = s . getByHash ( hash , ms2 . lset )
require . Nil ( t , got )
2023-11-27 07:40:30 -08:00
}
2023-11-28 05:54:37 -08:00
func TestPostingsCardinalityStats ( t * testing . T ) {
head := & Head { postings : index . NewMemPostings ( ) }
head . postings . Add ( 1 , labels . FromStrings ( labels . MetricName , "t" , "n" , "v1" ) )
head . postings . Add ( 2 , labels . FromStrings ( labels . MetricName , "t" , "n" , "v2" ) )
statsForMetricName := head . PostingsCardinalityStats ( labels . MetricName , 10 )
head . postings . Add ( 3 , labels . FromStrings ( labels . MetricName , "t" , "n" , "v3" ) )
// Using cache.
require . Equal ( t , statsForMetricName , head . PostingsCardinalityStats ( labels . MetricName , 10 ) )
statsForSomeLabel := head . PostingsCardinalityStats ( "n" , 10 )
// Cache should be evicted because of the change of label name.
require . NotEqual ( t , statsForMetricName , statsForSomeLabel )
head . postings . Add ( 4 , labels . FromStrings ( labels . MetricName , "t" , "n" , "v4" ) )
// Using cache.
require . Equal ( t , statsForSomeLabel , head . PostingsCardinalityStats ( "n" , 10 ) )
// Cache should be evicted because of the change of limit parameter.
statsForSomeLabel1 := head . PostingsCardinalityStats ( "n" , 1 )
require . NotEqual ( t , statsForSomeLabel1 , statsForSomeLabel )
// Using cache.
require . Equal ( t , statsForSomeLabel1 , head . PostingsCardinalityStats ( "n" , 1 ) )
}
2023-12-11 00:43:42 -08:00
2024-08-07 06:02:59 -07:00
func TestHeadAppender_AppendFloatWithSameTimestampAsPreviousHistogram ( t * testing . T ) {
head , _ := newTestHead ( t , DefaultBlockDuration , wlog . CompressionNone , false )
t . Cleanup ( func ( ) { head . Close ( ) } )
ls := labels . FromStrings ( labels . MetricName , "test" )
{
// Append a float 10.0 @ 1_000
app := head . Appender ( context . Background ( ) )
_ , err := app . Append ( 0 , ls , 1_000 , 10.0 )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
}
{
// Append a float histogram @ 2_000
app := head . Appender ( context . Background ( ) )
h := tsdbutil . GenerateTestHistogram ( 1 )
_ , err := app . AppendHistogram ( 0 , ls , 2_000 , h , nil )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
}
app := head . Appender ( context . Background ( ) )
_ , err := app . Append ( 0 , ls , 2_000 , 10.0 )
require . Error ( t , err )
require . ErrorIs ( t , err , storage . NewDuplicateHistogramToFloatErr ( 2_000 , 10.0 ) )
}
2023-12-11 00:43:42 -08:00
func TestHeadAppender_AppendCTZeroSample ( t * testing . T ) {
type appendableSamples struct {
ts int64
val float64
ct int64
}
for _ , tc := range [ ] struct {
name string
appendableSamples [ ] appendableSamples
2024-09-02 13:30:37 -07:00
expectedSamples [ ] chunks . Sample
2023-12-11 00:43:42 -08:00
} {
{
name : "In order ct+normal sample" ,
appendableSamples : [ ] appendableSamples {
{ ts : 100 , val : 10 , ct : 1 } ,
} ,
2024-09-02 13:30:37 -07:00
expectedSamples : [ ] chunks . Sample {
sample { t : 1 , f : 0 } ,
sample { t : 100 , f : 10 } ,
2023-12-11 00:43:42 -08:00
} ,
} ,
{
name : "Consecutive appends with same ct ignore ct" ,
appendableSamples : [ ] appendableSamples {
{ ts : 100 , val : 10 , ct : 1 } ,
{ ts : 101 , val : 10 , ct : 1 } ,
} ,
2024-09-02 13:30:37 -07:00
expectedSamples : [ ] chunks . Sample {
sample { t : 1 , f : 0 } ,
sample { t : 100 , f : 10 } ,
sample { t : 101 , f : 10 } ,
2023-12-11 00:43:42 -08:00
} ,
} ,
{
name : "Consecutive appends with newer ct do not ignore ct" ,
appendableSamples : [ ] appendableSamples {
{ ts : 100 , val : 10 , ct : 1 } ,
{ ts : 102 , val : 10 , ct : 101 } ,
} ,
2024-09-02 13:30:37 -07:00
expectedSamples : [ ] chunks . Sample {
sample { t : 1 , f : 0 } ,
sample { t : 100 , f : 10 } ,
sample { t : 101 , f : 0 } ,
sample { t : 102 , f : 10 } ,
2023-12-11 00:43:42 -08:00
} ,
} ,
{
name : "CT equals to previous sample timestamp is ignored" ,
appendableSamples : [ ] appendableSamples {
{ ts : 100 , val : 10 , ct : 1 } ,
{ ts : 101 , val : 10 , ct : 100 } ,
} ,
2024-09-02 13:30:37 -07:00
expectedSamples : [ ] chunks . Sample {
sample { t : 1 , f : 0 } ,
sample { t : 100 , f : 10 } ,
sample { t : 101 , f : 10 } ,
2023-12-11 00:43:42 -08:00
} ,
} ,
} {
2024-09-02 13:30:37 -07:00
t . Run ( tc . name , func ( t * testing . T ) {
h , _ := newTestHead ( t , DefaultBlockDuration , wlog . CompressionNone , false )
defer func ( ) {
require . NoError ( t , h . Close ( ) )
} ( )
a := h . Appender ( context . Background ( ) )
lbls := labels . FromStrings ( "foo" , "bar" )
for _ , sample := range tc . appendableSamples {
_ , err := a . AppendCTZeroSample ( 0 , lbls , sample . ts , sample . ct )
require . NoError ( t , err )
_ , err = a . Append ( 0 , lbls , sample . ts , sample . val )
require . NoError ( t , err )
}
require . NoError ( t , a . Commit ( ) )
2023-12-11 00:43:42 -08:00
2024-09-02 13:30:37 -07:00
q , err := NewBlockQuerier ( h , math . MinInt64 , math . MaxInt64 )
require . NoError ( t , err )
result := query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
require . Equal ( t , tc . expectedSamples , result [ ` { foo="bar"} ` ] )
} )
2023-12-11 00:43:42 -08:00
}
}
2024-03-26 04:17:38 -07:00
func TestHeadCompactableDoesNotCompactEmptyHead ( t * testing . T ) {
// Use a chunk range of 1 here so that if we attempted to determine if the head
// was compactable using default values for min and max times, `Head.compactable()`
// would return true which is incorrect. This test verifies that we short-circuit
// the check when the head has not yet had any samples added.
head , _ := newTestHead ( t , 1 , wlog . CompressionNone , false )
defer func ( ) {
require . NoError ( t , head . Close ( ) )
} ( )
require . False ( t , head . compactable ( ) )
}
2024-05-24 19:43:21 -07:00
type countSeriesLifecycleCallback struct {
created atomic . Int64
deleted atomic . Int64
}
func ( c * countSeriesLifecycleCallback ) PreCreation ( labels . Labels ) error { return nil }
func ( c * countSeriesLifecycleCallback ) PostCreation ( labels . Labels ) { c . created . Inc ( ) }
func ( c * countSeriesLifecycleCallback ) PostDeletion ( s map [ chunks . HeadSeriesRef ] labels . Labels ) {
c . deleted . Add ( int64 ( len ( s ) ) )
}