mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-25 13:44:05 -08:00
Merge pull request #6687 from pracucci/reduce-tsdb-head-inuse-memory
Trim TSDB head chunks after being cut, to reduce inuse memory
This commit is contained in:
commit
641676b397
|
@ -41,14 +41,29 @@ const (
|
||||||
|
|
||||||
// Chunk holds a sequence of sample pairs that can be iterated over and appended to.
|
// Chunk holds a sequence of sample pairs that can be iterated over and appended to.
|
||||||
type Chunk interface {
|
type Chunk interface {
|
||||||
|
// Bytes returns the underlying byte slice of the chunk.
|
||||||
Bytes() []byte
|
Bytes() []byte
|
||||||
|
|
||||||
|
// Encoding returns the encoding type of the chunk.
|
||||||
Encoding() Encoding
|
Encoding() Encoding
|
||||||
|
|
||||||
|
// Appender returns an appender to append samples to the chunk.
|
||||||
Appender() (Appender, error)
|
Appender() (Appender, error)
|
||||||
|
|
||||||
// The iterator passed as argument is for re-use.
|
// The iterator passed as argument is for re-use.
|
||||||
// Depending on implementation, the iterator can
|
// Depending on implementation, the iterator can
|
||||||
// be re-used or a new iterator can be allocated.
|
// be re-used or a new iterator can be allocated.
|
||||||
Iterator(Iterator) Iterator
|
Iterator(Iterator) Iterator
|
||||||
|
|
||||||
|
// NumSamples returns the number of samples in the chunk.
|
||||||
NumSamples() int
|
NumSamples() int
|
||||||
|
|
||||||
|
// Compact is called whenever a chunk is expected to be complete (no more
|
||||||
|
// samples appended) and the underlying implementation can eventually
|
||||||
|
// optimize the chunk.
|
||||||
|
// There's no strong guarantee that no samples will be appended once
|
||||||
|
// Compact() is called. Implementing this function is optional.
|
||||||
|
Compact()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Appender adds sample pairs to a chunk.
|
// Appender adds sample pairs to a chunk.
|
||||||
|
|
|
@ -49,6 +49,10 @@ import (
|
||||||
"math/bits"
|
"math/bits"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
chunkCompactCapacityThreshold = 32
|
||||||
|
)
|
||||||
|
|
||||||
// XORChunk holds XOR encoded sample data.
|
// XORChunk holds XOR encoded sample data.
|
||||||
type XORChunk struct {
|
type XORChunk struct {
|
||||||
b bstream
|
b bstream
|
||||||
|
@ -75,6 +79,14 @@ func (c *XORChunk) NumSamples() int {
|
||||||
return int(binary.BigEndian.Uint16(c.Bytes()))
|
return int(binary.BigEndian.Uint16(c.Bytes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *XORChunk) Compact() {
|
||||||
|
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
|
||||||
|
buf := make([]byte, l)
|
||||||
|
copy(buf, c.b.stream)
|
||||||
|
c.b.stream = buf
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Appender implements the Chunk interface.
|
// Appender implements the Chunk interface.
|
||||||
func (c *XORChunk) Appender() (Appender, error) {
|
func (c *XORChunk) Appender() (Appender, error) {
|
||||||
it := c.iterator(nil)
|
it := c.iterator(nil)
|
||||||
|
|
|
@ -1691,6 +1691,11 @@ func (s *memSeries) cut(mint int64) *memChunk {
|
||||||
s.chunks = append(s.chunks, c)
|
s.chunks = append(s.chunks, c)
|
||||||
s.headChunk = c
|
s.headChunk = c
|
||||||
|
|
||||||
|
// Remove exceeding capacity from the previous chunk byte slice to save memory.
|
||||||
|
if l := len(s.chunks); l > 1 {
|
||||||
|
s.chunks[l-2].chunk.Compact()
|
||||||
|
}
|
||||||
|
|
||||||
// Set upper bound on when the next chunk must be started. An earlier timestamp
|
// Set upper bound on when the next chunk must be started. An earlier timestamp
|
||||||
// may be chosen dynamically at a later point.
|
// may be chosen dynamically at a later point.
|
||||||
s.nextAt = rangeForTimestamp(mint, s.chunkRange)
|
s.nextAt = rangeForTimestamp(mint, s.chunkRange)
|
||||||
|
|
Loading…
Reference in a new issue