mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
alternative implementation
Signed-off-by: Callum Styan <callumstyan@gmail.com>
This commit is contained in:
parent
d3d5e2af0e
commit
07f5c209e6
|
@ -36,51 +36,47 @@ var noReferenceReleases = promauto.NewCounter(prometheus.CounterOpts{
|
|||
|
||||
type pool struct {
|
||||
mtx sync.RWMutex
|
||||
pool map[unique.Handle[string]]*entry
|
||||
pool map[string]*entry
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
refs atomic.Int64
|
||||
refs atomic.Int64
|
||||
handle unique.Handle[string]
|
||||
}
|
||||
|
||||
func newEntry() *entry {
|
||||
return &entry{}
|
||||
func newEntry(s string) *entry {
|
||||
return &entry{handle: unique.Make(s)}
|
||||
}
|
||||
|
||||
func newPool() *pool {
|
||||
return &pool{
|
||||
pool: map[unique.Handle[string]]*entry{},
|
||||
pool: map[string]*entry{},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pool) intern(s string) string {
|
||||
if s == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *pool) intern(s string) unique.Handle[string] {
|
||||
p.mtx.RLock()
|
||||
h := unique.Make(s)
|
||||
interned, ok := p.pool[h]
|
||||
interned, ok := p.pool[s]
|
||||
p.mtx.RUnlock()
|
||||
if ok {
|
||||
interned.refs.Inc()
|
||||
return s
|
||||
return interned.handle
|
||||
}
|
||||
p.mtx.Lock()
|
||||
defer p.mtx.Unlock()
|
||||
if interned, ok := p.pool[h]; ok {
|
||||
if interned, ok := p.pool[s]; ok {
|
||||
interned.refs.Inc()
|
||||
return s
|
||||
return interned.handle
|
||||
}
|
||||
p.pool[h] = newEntry()
|
||||
p.pool[h].refs.Store(1)
|
||||
return s
|
||||
|
||||
p.pool[s] = newEntry(s)
|
||||
p.pool[s].refs.Store(1)
|
||||
return p.pool[s].handle
|
||||
}
|
||||
|
||||
func (p *pool) release(s string) {
|
||||
p.mtx.RLock()
|
||||
h := unique.Make(s)
|
||||
interned, ok := p.pool[h]
|
||||
interned, ok := p.pool[s]
|
||||
p.mtx.RUnlock()
|
||||
|
||||
if !ok {
|
||||
|
@ -98,5 +94,5 @@ func (p *pool) release(s string) {
|
|||
if interned.refs.Load() != 0 {
|
||||
return
|
||||
}
|
||||
delete(p.pool, h)
|
||||
delete(p.pool, s)
|
||||
}
|
||||
|
|
|
@ -20,18 +20,16 @@ package remote
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
"time"
|
||||
"unique"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIntern(t *testing.T) {
|
||||
interner := newPool()
|
||||
testString := "TestIntern"
|
||||
interner.intern(testString)
|
||||
interned, ok := interner.pool[unique.Make(testString)]
|
||||
interned, ok := interner.pool[testString]
|
||||
|
||||
require.True(t, ok)
|
||||
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
|
||||
|
@ -42,13 +40,13 @@ func TestIntern_MultiRef(t *testing.T) {
|
|||
testString := "TestIntern_MultiRef"
|
||||
|
||||
interner.intern(testString)
|
||||
interned, ok := interner.pool[unique.Make(testString)]
|
||||
interned, ok := interner.pool[testString]
|
||||
|
||||
require.True(t, ok)
|
||||
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
|
||||
|
||||
interner.intern(testString)
|
||||
interned, ok = interner.pool[unique.Make(testString)]
|
||||
interned, ok = interner.pool[testString]
|
||||
|
||||
require.True(t, ok)
|
||||
require.Equal(t, int64(2), interned.refs.Load(), fmt.Sprintf("expected refs to be 2 but it was %d", interned.refs.Load()))
|
||||
|
@ -59,13 +57,13 @@ func TestIntern_DeleteRef(t *testing.T) {
|
|||
testString := "TestIntern_DeleteRef"
|
||||
|
||||
interner.intern(testString)
|
||||
interned, ok := interner.pool[unique.Make(testString)]
|
||||
interned, ok := interner.pool[testString]
|
||||
|
||||
require.True(t, ok)
|
||||
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
|
||||
|
||||
interner.release(testString)
|
||||
_, ok = interner.pool[unique.Make(testString)]
|
||||
_, ok = interner.pool[testString]
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
|
@ -74,7 +72,7 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
|
|||
testString := "TestIntern_MultiRef_Concurrent"
|
||||
|
||||
interner.intern(testString)
|
||||
interned, ok := interner.pool[unique.Make(testString)]
|
||||
interned, ok := interner.pool[testString]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
|
||||
|
||||
|
@ -85,7 +83,7 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
|
|||
time.Sleep(time.Millisecond)
|
||||
|
||||
interner.mtx.RLock()
|
||||
interned, ok = interner.pool[unique.Make(testString)]
|
||||
interned, ok = interner.pool[testString]
|
||||
interner.mtx.RUnlock()
|
||||
require.True(t, ok)
|
||||
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
"unique"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
|
@ -61,6 +62,13 @@ const (
|
|||
reasonUnintentionalDroppedSeries = "unintentionally_dropped_series"
|
||||
)
|
||||
|
||||
var handlesPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
//t.Log("Created")
|
||||
return make([]unique.Handle[string], 0)
|
||||
},
|
||||
}
|
||||
|
||||
type queueManagerMetrics struct {
|
||||
reg prometheus.Registerer
|
||||
|
||||
|
@ -398,6 +406,11 @@ type WriteClient interface {
|
|||
Endpoint() string
|
||||
}
|
||||
|
||||
type internRef struct {
|
||||
//handles []unique.Handle[string]
|
||||
lset labels.Labels
|
||||
}
|
||||
|
||||
// QueueManager manages a queue of samples to be sent to the Storage
|
||||
// indicated by the provided WriteClient. Implements writeTo interface
|
||||
// used by WAL Watcher.
|
||||
|
@ -424,10 +437,11 @@ type QueueManager struct {
|
|||
enc Compression
|
||||
|
||||
seriesMtx sync.Mutex // Covers seriesLabels, seriesMetadata, droppedSeries and builder.
|
||||
seriesLabels map[chunks.HeadSeriesRef]labels.Labels
|
||||
seriesLabels map[chunks.HeadSeriesRef]internRef
|
||||
seriesMetadata map[chunks.HeadSeriesRef]*metadata.Metadata
|
||||
droppedSeries map[chunks.HeadSeriesRef]struct{}
|
||||
builder *labels.Builder
|
||||
handles []unique.Handle[string]
|
||||
|
||||
seriesSegmentMtx sync.Mutex // Covers seriesSegmentIndexes - if you also lock seriesMtx, take seriesMtx first.
|
||||
seriesSegmentIndexes map[chunks.HeadSeriesRef]int
|
||||
|
@ -492,7 +506,7 @@ func NewQueueManager(
|
|||
sendExemplars: enableExemplarRemoteWrite,
|
||||
sendNativeHistograms: enableNativeHistogramRemoteWrite,
|
||||
|
||||
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
|
||||
seriesLabels: make(map[chunks.HeadSeriesRef]internRef),
|
||||
seriesMetadata: make(map[chunks.HeadSeriesRef]*metadata.Metadata),
|
||||
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
|
||||
droppedSeries: make(map[chunks.HeadSeriesRef]struct{}),
|
||||
|
@ -730,7 +744,7 @@ outer:
|
|||
default:
|
||||
}
|
||||
if t.shards.enqueue(s.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
seriesLabels: lbls.lset,
|
||||
metadata: meta,
|
||||
timestamp: s.T,
|
||||
value: s.V,
|
||||
|
@ -788,7 +802,7 @@ outer:
|
|||
default:
|
||||
}
|
||||
if t.shards.enqueue(e.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
seriesLabels: lbls.lset,
|
||||
metadata: meta,
|
||||
timestamp: e.T,
|
||||
value: e.V,
|
||||
|
@ -834,6 +848,7 @@ outer:
|
|||
continue
|
||||
}
|
||||
meta := t.seriesMetadata[h.Ref]
|
||||
|
||||
t.seriesMtx.Unlock()
|
||||
|
||||
backoff := model.Duration(5 * time.Millisecond)
|
||||
|
@ -844,7 +859,7 @@ outer:
|
|||
default:
|
||||
}
|
||||
if t.shards.enqueue(h.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
seriesLabels: lbls.lset,
|
||||
metadata: meta,
|
||||
timestamp: h.T,
|
||||
histogram: h.H,
|
||||
|
@ -889,6 +904,7 @@ outer:
|
|||
continue
|
||||
}
|
||||
meta := t.seriesMetadata[h.Ref]
|
||||
|
||||
t.seriesMtx.Unlock()
|
||||
|
||||
backoff := model.Duration(5 * time.Millisecond)
|
||||
|
@ -899,7 +915,7 @@ outer:
|
|||
default:
|
||||
}
|
||||
if t.shards.enqueue(h.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
seriesLabels: lbls.lset,
|
||||
metadata: meta,
|
||||
timestamp: h.T,
|
||||
floatHistogram: h.FH,
|
||||
|
@ -960,8 +976,8 @@ func (t *QueueManager) Stop() {
|
|||
|
||||
// On shutdown, release the strings in the labels from the intern pool.
|
||||
t.seriesMtx.Lock()
|
||||
for _, labels := range t.seriesLabels {
|
||||
t.releaseLabels(labels)
|
||||
for k := range t.seriesLabels {
|
||||
delete(t.seriesLabels, k)
|
||||
}
|
||||
t.seriesMtx.Unlock()
|
||||
t.metrics.unregister()
|
||||
|
@ -990,10 +1006,11 @@ func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) {
|
|||
// We should not ever be replacing a series labels in the map, but just
|
||||
// in case we do we need to ensure we do not leak the replaced interned
|
||||
// strings.
|
||||
if orig, ok := t.seriesLabels[s.Ref]; ok {
|
||||
t.releaseLabels(orig)
|
||||
if _, ok := t.seriesLabels[s.Ref]; ok {
|
||||
delete(t.seriesLabels, s.Ref)
|
||||
}
|
||||
t.seriesLabels[s.Ref] = lbls
|
||||
|
||||
t.seriesLabels[s.Ref] = internRef{lset: lbls}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1037,7 +1054,8 @@ func (t *QueueManager) SeriesReset(index int) {
|
|||
for k, v := range t.seriesSegmentIndexes {
|
||||
if v < index {
|
||||
delete(t.seriesSegmentIndexes, k)
|
||||
t.releaseLabels(t.seriesLabels[k])
|
||||
//t.releaseLabels(t.seriesLabels[k])
|
||||
//delete(t.seriesLabels, k)
|
||||
delete(t.seriesLabels, k)
|
||||
delete(t.seriesMetadata, k)
|
||||
delete(t.droppedSeries, k)
|
||||
|
@ -1060,11 +1078,10 @@ func (t *QueueManager) client() WriteClient {
|
|||
}
|
||||
|
||||
func (t *QueueManager) internLabels(lbls labels.Labels) {
|
||||
lbls.InternStrings(t.interner.intern)
|
||||
}
|
||||
|
||||
func (t *QueueManager) releaseLabels(ls labels.Labels) {
|
||||
ls.ReleaseStrings(t.interner.release)
|
||||
for i := range lbls {
|
||||
t.interner.intern(lbls[i].Name)
|
||||
t.interner.intern(lbls[i].Value)
|
||||
}
|
||||
}
|
||||
|
||||
// processExternalLabels merges externalLabels into b. If b contains
|
||||
|
|
Loading…
Reference in a new issue