mirror of
https://github.com/prometheus/prometheus.git
synced 2025-01-12 14:27:27 -08:00
Precalculate memSeries.head
This is read far more than it changes. This cuts ~14% off walltme and ~27% off CPU for WAL reading. Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
This commit is contained in:
parent
d8c8e4e6e4
commit
a64b0d51c4
12
head.go
12
head.go
|
@ -1338,6 +1338,7 @@ type memSeries struct {
|
|||
ref uint64
|
||||
lset labels.Labels
|
||||
chunks []*memChunk
|
||||
headChunk *memChunk
|
||||
chunkRange int64
|
||||
firstChunkID int
|
||||
|
||||
|
@ -1371,6 +1372,7 @@ func (s *memSeries) cut(mint int64) *memChunk {
|
|||
maxTime: math.MinInt64,
|
||||
}
|
||||
s.chunks = append(s.chunks, c)
|
||||
s.headChunk = c
|
||||
|
||||
// Set upper bound on when the next chunk must be started. An earlier timestamp
|
||||
// may be chosen dynamically at a later point.
|
||||
|
@ -1439,6 +1441,11 @@ func (s *memSeries) truncateChunksBefore(mint int64) (removed int) {
|
|||
}
|
||||
s.chunks = append(s.chunks[:0], s.chunks[k:]...)
|
||||
s.firstChunkID += k
|
||||
if len(s.chunks) == 0 {
|
||||
s.headChunk = nil
|
||||
} else {
|
||||
s.headChunk = s.chunks[len(s.chunks)-1]
|
||||
}
|
||||
|
||||
return k
|
||||
}
|
||||
|
@ -1521,10 +1528,7 @@ func (s *memSeries) iterator(id int) chunkenc.Iterator {
|
|||
}
|
||||
|
||||
func (s *memSeries) head() *memChunk {
|
||||
if len(s.chunks) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.chunks[len(s.chunks)-1]
|
||||
return s.headChunk
|
||||
}
|
||||
|
||||
type memChunk struct {
|
||||
|
|
Loading…
Reference in a new issue