From 7c7dafc32135683082f22d1589acdc92c7b0ec33 Mon Sep 17 00:00:00 2001 From: Chris Marchbanks Date: Thu, 6 May 2021 12:56:45 -0600 Subject: [PATCH] Do not snappy encode if record is too large (#8790) Snappy cannot encode records larger than ~3.7 GB and will panic if an encoding is attempted. Check to make sure that the record is smaller than this before encoding. In the future, we could improve this behavior to still compress large records (or break them up into smaller records), but this avoids the panic for users with very large single scrape targets. Signed-off-by: Chris Marchbanks --- tsdb/wal/wal.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tsdb/wal/wal.go b/tsdb/wal/wal.go index 7b45e0e912..ba82251a3f 100644 --- a/tsdb/wal/wal.go +++ b/tsdb/wal/wal.go @@ -616,7 +616,10 @@ func (w *WAL) log(rec []byte, final bool) error { // Compress the record before calculating if a new segment is needed. compressed := false - if w.compress && len(rec) > 0 { + if w.compress && + len(rec) > 0 && + // If MaxEncodedLen is less than 0 the record is too large to be compressed. + snappy.MaxEncodedLen(len(rec)) >= 0 { // The snappy library uses `len` to calculate if we need a new buffer. // In order to allocate as few buffers as possible make the length // equal to the capacity.