From 96d03b6f46d98fb9b586026bd973aec603954853 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Wed, 20 Sep 2023 14:49:56 +0200 Subject: [PATCH] Fix duplicate sample detection at chunks size limit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before cutting a new XOR chunk in case the chunk goes over the size limit, check that the timestamp is in order and not equal or older than the latest sample in the old chunk. Signed-off-by: György Krajcsovits --- tsdb/head_append.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 901694375..e271ff6c5 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1283,6 +1283,9 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts c = s.cutNewHeadChunk(t, e, o.chunkRange) chunkCreated = true } else if len(c.chunk.Bytes()) > maxBytesPerXORChunk { + if c.maxTime >= t { + return c, false, false + } c = s.cutNewHeadChunk(t, e, o.chunkRange) chunkCreated = true }