mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-30 07:59:40 -08:00
50cd453c8f
Some checks failed
buf.build / lint and publish (push) Waiting to run
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
Scorecards supply-chain security / Scorecards analysis (push) Waiting to run
Push README to Docker Hub / Push README to Docker Hub (push) Has been cancelled
Push README to Docker Hub / Push README to quay.io (push) Has been cancelled
* Fix typos --------- Signed-off-by: Nathan Baulch <nathan.baulch@gmail.com>
179 lines
4.4 KiB
Go
179 lines
4.4 KiB
Go
// Copyright 2023 The Prometheus Authors
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package zeropool_test
|
|
|
|
import (
|
|
"math"
|
|
"sync"
|
|
"testing"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
"go.uber.org/atomic"
|
|
|
|
"github.com/prometheus/prometheus/util/zeropool"
|
|
)
|
|
|
|
func TestPool(t *testing.T) {
|
|
t.Run("provides correct values", func(t *testing.T) {
|
|
pool := zeropool.New(func() []byte { return make([]byte, 1024) })
|
|
item1 := pool.Get()
|
|
require.Len(t, item1, 1024)
|
|
|
|
item2 := pool.Get()
|
|
require.Len(t, item2, 1024)
|
|
|
|
pool.Put(item1)
|
|
pool.Put(item2)
|
|
|
|
item1 = pool.Get()
|
|
require.Len(t, item1, 1024)
|
|
|
|
item2 = pool.Get()
|
|
require.Len(t, item2, 1024)
|
|
})
|
|
|
|
t.Run("is not racy", func(t *testing.T) {
|
|
pool := zeropool.New(func() []byte { return make([]byte, 1024) })
|
|
|
|
const iterations = 1e6
|
|
const concurrency = math.MaxUint8
|
|
var counter atomic.Int64
|
|
|
|
do := make(chan struct{}, 1e6)
|
|
for i := 0; i < iterations; i++ {
|
|
do <- struct{}{}
|
|
}
|
|
close(do)
|
|
|
|
run := make(chan struct{})
|
|
done := sync.WaitGroup{}
|
|
done.Add(concurrency)
|
|
for i := 0; i < concurrency; i++ {
|
|
go func(worker int) {
|
|
<-run
|
|
for range do {
|
|
item := pool.Get()
|
|
item[0] = byte(worker)
|
|
counter.Add(1) // Counts and also adds some delay to add raciness.
|
|
if item[0] != byte(worker) {
|
|
panic("wrong value")
|
|
}
|
|
pool.Put(item)
|
|
}
|
|
done.Done()
|
|
}(i)
|
|
}
|
|
close(run)
|
|
done.Wait()
|
|
t.Logf("Done %d iterations", counter.Load())
|
|
})
|
|
|
|
t.Run("does not allocate", func(t *testing.T) {
|
|
pool := zeropool.New(func() []byte { return make([]byte, 1024) })
|
|
// Warm up, this will allocate one slice.
|
|
slice := pool.Get()
|
|
pool.Put(slice)
|
|
|
|
allocs := testing.AllocsPerRun(1000, func() {
|
|
slice := pool.Get()
|
|
pool.Put(slice)
|
|
})
|
|
// Don't compare to 0, as when passing all the tests the GC could flush the pools during this test and we would allocate.
|
|
// Just check that it's less than 1 on average, which is mostly the same thing.
|
|
require.Less(t, allocs, 1., "Should not allocate.")
|
|
})
|
|
|
|
t.Run("zero value is valid", func(t *testing.T) {
|
|
var pool zeropool.Pool[[]byte]
|
|
slice := pool.Get()
|
|
pool.Put(slice)
|
|
|
|
allocs := testing.AllocsPerRun(1000, func() {
|
|
slice := pool.Get()
|
|
pool.Put(slice)
|
|
})
|
|
// Don't compare to 0, as when passing all the tests the GC could flush the pools during this test and we would allocate.
|
|
// Just check that it's less than 1 on average, which is mostly the same thing.
|
|
require.Less(t, allocs, 1., "Should not allocate.")
|
|
})
|
|
}
|
|
|
|
func BenchmarkZeropoolPool(b *testing.B) {
|
|
pool := zeropool.New(func() []byte { return make([]byte, 1024) })
|
|
|
|
// Warmup
|
|
item := pool.Get()
|
|
pool.Put(item)
|
|
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
item := pool.Get()
|
|
pool.Put(item)
|
|
}
|
|
}
|
|
|
|
// BenchmarkSyncPoolValue uses sync.Pool to store values, which makes an allocation on each Put call.
|
|
func BenchmarkSyncPoolValue(b *testing.B) {
|
|
pool := sync.Pool{New: func() any {
|
|
return make([]byte, 1024)
|
|
}}
|
|
|
|
// Warmup
|
|
item := pool.Get().([]byte)
|
|
pool.Put(item) //nolint:staticcheck // This allocates.
|
|
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
item := pool.Get().([]byte)
|
|
pool.Put(item) //nolint:staticcheck // This allocates.
|
|
}
|
|
}
|
|
|
|
// BenchmarkSyncPoolNewPointer uses sync.Pool to store pointers, but it calls Put with a new pointer every time.
|
|
func BenchmarkSyncPoolNewPointer(b *testing.B) {
|
|
pool := sync.Pool{New: func() any {
|
|
v := make([]byte, 1024)
|
|
return &v
|
|
}}
|
|
|
|
// Warmup
|
|
item := pool.Get().(*[]byte)
|
|
pool.Put(item)
|
|
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
item := pool.Get().(*[]byte)
|
|
buf := *item
|
|
pool.Put(&buf)
|
|
}
|
|
}
|
|
|
|
// BenchmarkSyncPoolPointer illustrates the optimal usage of sync.Pool, not always possible.
|
|
func BenchmarkSyncPoolPointer(b *testing.B) {
|
|
pool := sync.Pool{New: func() any {
|
|
v := make([]byte, 1024)
|
|
return &v
|
|
}}
|
|
|
|
// Warmup
|
|
item := pool.Get().(*[]byte)
|
|
pool.Put(item)
|
|
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
item := pool.Get().(*[]byte)
|
|
pool.Put(item)
|
|
}
|
|
}
|