mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-23 12:44:05 -08:00
Do not snappy encode if record is too large (#8790)
Snappy cannot encode records larger than ~3.7 GB and will panic if an encoding is attempted. Check to make sure that the record is smaller than this before encoding. In the future, we could improve this behavior to still compress large records (or break them up into smaller records), but this avoids the panic for users with very large single scrape targets. Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
This commit is contained in:
parent
b50f9c1c84
commit
7c7dafc321
|
@ -616,7 +616,10 @@ func (w *WAL) log(rec []byte, final bool) error {
|
|||
|
||||
// Compress the record before calculating if a new segment is needed.
|
||||
compressed := false
|
||||
if w.compress && len(rec) > 0 {
|
||||
if w.compress &&
|
||||
len(rec) > 0 &&
|
||||
// If MaxEncodedLen is less than 0 the record is too large to be compressed.
|
||||
snappy.MaxEncodedLen(len(rec)) >= 0 {
|
||||
// The snappy library uses `len` to calculate if we need a new buffer.
|
||||
// In order to allocate as few buffers as possible make the length
|
||||
// equal to the capacity.
|
||||
|
|
Loading…
Reference in a new issue