mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
Fix duplicate sample detection at chunks size limit
Before cutting a new XOR chunk in case the chunk goes over the size limit, check that the timestamp is in order and not equal or older than the latest sample in the old chunk. Signed-off-by: György Krajcsovits <gyorgy.krajcsovits@grafana.com>
This commit is contained in:
parent
56b3a015b6
commit
96d03b6f46
|
@ -1283,6 +1283,9 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts
|
|||
c = s.cutNewHeadChunk(t, e, o.chunkRange)
|
||||
chunkCreated = true
|
||||
} else if len(c.chunk.Bytes()) > maxBytesPerXORChunk {
|
||||
if c.maxTime >= t {
|
||||
return c, false, false
|
||||
}
|
||||
c = s.cutNewHeadChunk(t, e, o.chunkRange)
|
||||
chunkCreated = true
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue