From 2c1be4df7bb50eec068a7432c041bab900fdede1 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 30 Mar 2022 10:40:19 +0100 Subject: [PATCH] tsdb: more efficient sorting of postings read from WAL at startup (#10500) * tsdb: avoid slice-to-interface allocation in EnsureOrder This is pulling the `seriesRefSlice` out of the loop, so the compiler doesn't allocate a new one on the heap every time. Signed-off-by: Bryan Boreham * tsdb: use pointer type in Pool for EnsureOrder As noted by staticcheck, Pool prefers the objects in the pool to have pointer type. This is a little more fiddly to code, but avoids allocation of a wrapper object every time a slice is put into the pool. Removed a comment that said fixing this has a performance penalty: not borne out by benchmarks. Signed-off-by: Bryan Boreham --- tsdb/index/postings.go | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 22aaf7f20..8df2bccf6 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -39,7 +39,8 @@ const ensureOrderBatchSize = 1024 // ensureOrderBatchPool is a pool used to recycle batches passed to workers in MemPostings.EnsureOrder(). var ensureOrderBatchPool = sync.Pool{ New: func() interface{} { - return make([][]storage.SeriesRef, 0, ensureOrderBatchSize) + x := make([][]storage.SeriesRef, 0, ensureOrderBatchSize) + return &x // Return pointer type as preferred by Pool. }, } @@ -231,39 +232,41 @@ func (p *MemPostings) EnsureOrder() { } n := runtime.GOMAXPROCS(0) - workc := make(chan [][]storage.SeriesRef) + workc := make(chan *[][]storage.SeriesRef) var wg sync.WaitGroup wg.Add(n) for i := 0; i < n; i++ { go func() { + var sortable seriesRefSlice for job := range workc { - for _, l := range job { - sort.Sort(seriesRefSlice(l)) + for _, l := range *job { + sortable = l + sort.Sort(&sortable) } - job = job[:0] - ensureOrderBatchPool.Put(job) //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty. + *job = (*job)[:0] + ensureOrderBatchPool.Put(job) } wg.Done() }() } - nextJob := ensureOrderBatchPool.Get().([][]storage.SeriesRef) + nextJob := ensureOrderBatchPool.Get().(*[][]storage.SeriesRef) for _, e := range p.m { for _, l := range e { - nextJob = append(nextJob, l) + *nextJob = append(*nextJob, l) - if len(nextJob) >= ensureOrderBatchSize { + if len(*nextJob) >= ensureOrderBatchSize { workc <- nextJob - nextJob = ensureOrderBatchPool.Get().([][]storage.SeriesRef) + nextJob = ensureOrderBatchPool.Get().(*[][]storage.SeriesRef) } } } // If the last job was partially filled, we need to push it to workers too. - if len(nextJob) > 0 { + if len(*nextJob) > 0 { workc <- nextJob }