Addressed review comments

Signed-off-by: Marco Pracucci <marco@pracucci.com>
This commit is contained in:
Marco Pracucci 2023-09-28 10:32:19 +02:00
parent 2986a7a4ba
commit 9bcca44ac4
No known key found for this signature in database
GPG key ID: 74C1BD403D2DF9B5
3 changed files with 4 additions and 29 deletions

View file

@ -853,16 +853,6 @@ type PostingsCloner struct {
// and it shouldn't be used once provided to the PostingsCloner.
func NewPostingsCloner(p Postings) *PostingsCloner {
ids, err := ExpandPostings(p)
// The ExpandedPostings() doesn't know the total number of postings beforehand,
// so the returned slice capacity may be well above the actual number of items.
// In such case, we shrink it.
if float64(len(ids)) < float64(cap(ids))*0.70 {
shrunk := make([]storage.SeriesRef, len(ids))
copy(shrunk, ids)
ids = shrunk
}
return &PostingsCloner{ids: ids, err: err}
}

View file

@ -24,7 +24,6 @@ import (
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
@ -1104,20 +1103,6 @@ func TestPostingsCloner(t *testing.T) {
})
}
func TestNewPostingsCloner_ShrinkExpandedPostingsSlice(t *testing.T) {
t.Run("should not shrink expanded postings if length is >= 70% capacity", func(t *testing.T) {
cloner := NewPostingsCloner(NewListPostings(make([]storage.SeriesRef, 60)))
assert.Equal(t, 60, len(cloner.ids))
assert.Equal(t, 64, cap(cloner.ids)) // Not shrunk.
})
t.Run("should shrink expanded postings if length is < 70% capacity", func(t *testing.T) {
cloner := NewPostingsCloner(NewListPostings(make([]storage.SeriesRef, 33)))
assert.Equal(t, 33, len(cloner.ids))
assert.Equal(t, 33, cap(cloner.ids)) // Shrunk.
})
}
func TestFindIntersectingPostings(t *testing.T) {
t.Run("multiple intersections", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 15, 20, 25, 30, 35, 40, 45, 50})

View file

@ -36,7 +36,7 @@ type IndexPostingsReader interface {
// NewPostingsForMatchersCache creates a new PostingsForMatchersCache.
// If `ttl` is 0, then it only deduplicates in-flight requests.
// If `force` is true, then all requests go through cache, regardless of the `concurrent` param provided.
// If `force` is true, then all requests go through cache, regardless of the `concurrent` param provided to the PostingsForMatchers method.
func NewPostingsForMatchersCache(ttl time.Duration, maxItems int, maxBytes int64, force bool) *PostingsForMatchersCache {
b := &PostingsForMatchersCache{
calls: &sync.Map{},
@ -112,9 +112,9 @@ func (c *PostingsForMatchersCache) postingsForMatchersPromise(ctx context.Contex
// Estimate the size of the cache entry, in bytes. We use max() because
// size.Of() returns -1 if the value is nil.
estimatedSizeBytes := int64(len(key)) + max(0, int64(size.Of(outerErr))) + max(0, int64(size.Of(cloner)))
sizeBytes := int64(len(key)) + max(0, int64(size.Of(outerErr))) + max(0, int64(size.Of(cloner)))
c.created(key, c.timeNow(), estimatedSizeBytes)
c.created(key, c.timeNow(), sizeBytes)
return promise
}
@ -122,7 +122,7 @@ type postingsForMatchersCachedCall struct {
key string
ts time.Time
// Size of he cached entry, in bytes.
// Size of the cached entry, in bytes.
sizeBytes int64
}