From a727653618ec505b3d042629dfdef658251e5b8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=A1s=20Pazos?= Date: Thu, 2 Nov 2023 17:51:37 -0300 Subject: [PATCH] fix snappy allocations and improve benchmarks --- storage/remote/compression.go | 4 +- storage/remote/compression_test.go | 106 ++++++++++++++++++++++++++--- 2 files changed, 97 insertions(+), 13 deletions(-) diff --git a/storage/remote/compression.go b/storage/remote/compression.go index d810bc1238..e22ef50d17 100644 --- a/storage/remote/compression.go +++ b/storage/remote/compression.go @@ -104,7 +104,7 @@ type snappyCompression struct { func (s *snappyCompression) Compress(data []byte) ([]byte, error) { s.buf = s.buf[0:cap(s.buf)] compressed := snappy.Encode(s.buf, data) - if n := snappy.MaxEncodedLen(len(data)); n > len(s.buf) { + if n := snappy.MaxEncodedLen(len(data)); n > cap(s.buf) { s.buf = make([]byte, n) } return compressed, nil @@ -125,7 +125,7 @@ type snappyAltCompression struct { func (s *snappyAltCompression) Compress(data []byte) ([]byte, error) { s.buf = s.buf[:0] res := reSnappy.Encode(s.buf, data) - if n := reSnappy.MaxEncodedLen(len(data)); n > len(s.buf) { + if n := reSnappy.MaxEncodedLen(len(data)); n > cap(s.buf) { s.buf = make([]byte, n) } return res, nil diff --git a/storage/remote/compression_test.go b/storage/remote/compression_test.go index 8aacf76074..6d5ba70c92 100644 --- a/storage/remote/compression_test.go +++ b/storage/remote/compression_test.go @@ -1,6 +1,9 @@ package remote -import "testing" +import ( + "testing" + "time" +) func TestCompressions(t *testing.T) { data := makeUncompressedReducedWriteRequestBenchData(t) @@ -59,7 +62,7 @@ func BenchmarkCompressions(b *testing.B) { {"FlateFast", FlateFast}, {"FlateComp", FlateComp}, {"BrotliFast", BrotliFast}, - {"BrotliComp", BrotliComp}, + // {"BrotliComp", BrotliComp}, {"BrotliDefault", BrotliDefault}, } comps := make(map[CompAlgorithm]Compression) @@ -71,7 +74,7 @@ func BenchmarkCompressions(b *testing.B) { comps[c.algo] = comp decomps[c.algo] = decomp // warmup - for i := 0; i < 10; i++ { + for i := 0; i < 2; i++ { compressed, err := comp.Compress(data) if err != nil { b.Fatal(err) @@ -84,30 +87,111 @@ func BenchmarkCompressions(b *testing.B) { } for _, c := range bc { - b.Run("compress-"+c.name, func(b *testing.B) { + b.Run(c.name, func(b *testing.B) { comp := comps[c.algo] - b.ResetTimer() + decomp := decomps[c.algo] + + totalSize := 0 + compTime := 0 + decompTime := 0 + rate := 0.0 + var start time.Time for i := 0; i < b.N; i++ { - _, err := comp.Compress(data) + start = time.Now() + res, err := comp.Compress(data) if err != nil { b.Fatal(err) } + compTime += int(time.Since(start)) + totalSize += len(res) + rate += float64(len(data)) / float64(len(res)) + + start = time.Now() + _, err = decomp.Decompress(res) + if err != nil { + b.Fatal(err) + } + decompTime += int(time.Since(start)) } + b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op") + b.ReportMetric(float64(compTime)/float64(b.N), "nsCompress/op") + b.ReportMetric(float64(decompTime)/float64(b.N), "nsDecompress/op") + b.ReportMetric(rate/float64(b.N), "compressionX/op") }) - b.Run("decompress-"+c.name, func(b *testing.B) { - comp := comps[c.algo] - decomp := decomps[c.algo] + } +} + +func BenchmarkCompressionsV1(b *testing.B) { + data := makeUncompressedWriteRequestBenchData(b) + bc := []struct { + name string + algo CompAlgorithm + }{ + {"Snappy", Snappy}, + {"SnappyAlt", SnappyAlt}, + {"S2", S2}, + {"ZstdFast", ZstdFast}, + {"ZstdDefault", ZstdDefault}, + {"ZstdBestComp", ZstdBestComp}, + {"Lzw", Lzw}, + {"FlateFast", FlateFast}, + {"FlateComp", FlateComp}, + {"BrotliFast", BrotliFast}, + // {"BrotliComp", BrotliComp}, + {"BrotliDefault", BrotliDefault}, + } + comps := make(map[CompAlgorithm]Compression) + decomps := make(map[CompAlgorithm]Compression) + for _, c := range bc { + UseAlgorithm = c.algo + comp := createComp() + decomp := createComp() + comps[c.algo] = comp + decomps[c.algo] = decomp + // warmup + for i := 0; i < 2; i++ { compressed, err := comp.Compress(data) if err != nil { b.Fatal(err) } - b.ResetTimer() + _, err = decomp.Decompress(compressed) + if err != nil { + b.Fatal(err) + } + } + } + + for _, c := range bc { + b.Run(c.name, func(b *testing.B) { + comp := comps[c.algo] + decomp := decomps[c.algo] + + totalSize := 0 + compTime := 0 + decompTime := 0 + rate := 0.0 + var start time.Time for i := 0; i < b.N; i++ { - _, err = decomp.Decompress(compressed) + start = time.Now() + res, err := comp.Compress(data) if err != nil { b.Fatal(err) } + compTime += int(time.Since(start)) + totalSize += len(res) + rate += float64(len(data)) / float64(len(res)) + + start = time.Now() + _, err = decomp.Decompress(res) + if err != nil { + b.Fatal(err) + } + decompTime += int(time.Since(start)) } + b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op") + b.ReportMetric(float64(compTime)/float64(b.N), "nsCompress/op") + b.ReportMetric(float64(decompTime)/float64(b.N), "nsDecompress/op") + b.ReportMetric(rate/float64(b.N), "compressionX/op") }) } }