mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
tsdb: more efficient sorting of postings read from WAL at startup (#10500)
* tsdb: avoid slice-to-interface allocation in EnsureOrder This is pulling the `seriesRefSlice` out of the loop, so the compiler doesn't allocate a new one on the heap every time. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * tsdb: use pointer type in Pool for EnsureOrder As noted by staticcheck, Pool prefers the objects in the pool to have pointer type. This is a little more fiddly to code, but avoids allocation of a wrapper object every time a slice is put into the pool. Removed a comment that said fixing this has a performance penalty: not borne out by benchmarks. Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
This commit is contained in:
parent
3e4bd4d913
commit
2c1be4df7b
|
@ -39,7 +39,8 @@ const ensureOrderBatchSize = 1024
|
||||||
// ensureOrderBatchPool is a pool used to recycle batches passed to workers in MemPostings.EnsureOrder().
|
// ensureOrderBatchPool is a pool used to recycle batches passed to workers in MemPostings.EnsureOrder().
|
||||||
var ensureOrderBatchPool = sync.Pool{
|
var ensureOrderBatchPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
return make([][]storage.SeriesRef, 0, ensureOrderBatchSize)
|
x := make([][]storage.SeriesRef, 0, ensureOrderBatchSize)
|
||||||
|
return &x // Return pointer type as preferred by Pool.
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,39 +232,41 @@ func (p *MemPostings) EnsureOrder() {
|
||||||
}
|
}
|
||||||
|
|
||||||
n := runtime.GOMAXPROCS(0)
|
n := runtime.GOMAXPROCS(0)
|
||||||
workc := make(chan [][]storage.SeriesRef)
|
workc := make(chan *[][]storage.SeriesRef)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(n)
|
wg.Add(n)
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
|
var sortable seriesRefSlice
|
||||||
for job := range workc {
|
for job := range workc {
|
||||||
for _, l := range job {
|
for _, l := range *job {
|
||||||
sort.Sort(seriesRefSlice(l))
|
sortable = l
|
||||||
|
sort.Sort(&sortable)
|
||||||
}
|
}
|
||||||
|
|
||||||
job = job[:0]
|
*job = (*job)[:0]
|
||||||
ensureOrderBatchPool.Put(job) //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
|
ensureOrderBatchPool.Put(job)
|
||||||
}
|
}
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
nextJob := ensureOrderBatchPool.Get().([][]storage.SeriesRef)
|
nextJob := ensureOrderBatchPool.Get().(*[][]storage.SeriesRef)
|
||||||
for _, e := range p.m {
|
for _, e := range p.m {
|
||||||
for _, l := range e {
|
for _, l := range e {
|
||||||
nextJob = append(nextJob, l)
|
*nextJob = append(*nextJob, l)
|
||||||
|
|
||||||
if len(nextJob) >= ensureOrderBatchSize {
|
if len(*nextJob) >= ensureOrderBatchSize {
|
||||||
workc <- nextJob
|
workc <- nextJob
|
||||||
nextJob = ensureOrderBatchPool.Get().([][]storage.SeriesRef)
|
nextJob = ensureOrderBatchPool.Get().(*[][]storage.SeriesRef)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the last job was partially filled, we need to push it to workers too.
|
// If the last job was partially filled, we need to push it to workers too.
|
||||||
if len(nextJob) > 0 {
|
if len(*nextJob) > 0 {
|
||||||
workc <- nextJob
|
workc <- nextJob
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue