mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Pass ref to SeriesLifecycleCallback.PostDeletion (#12626)
When a particular SeriesLifecycleCallback tries to optimize and run closer to the Head, keeping track of the HeadSeriesRef instead of the labelsets, it's impossible to handle the PostDeletion callback properly as there's no way to know which series refs were deleted from the head. This changes the callback to provide the series refs alongside the labelsets, so the implementation can choose what to do. Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
This commit is contained in:
parent
95cad0b070
commit
61daa30bb1
13
tsdb/head.go
13
tsdb/head.go
|
@ -206,7 +206,7 @@ type SeriesLifecycleCallback interface {
|
|||
// PostCreation is called after creating a series to indicate a creation of series.
|
||||
PostCreation(labels.Labels)
|
||||
// PostDeletion is called after deletion of series.
|
||||
PostDeletion(...labels.Labels)
|
||||
PostDeletion(map[chunks.HeadSeriesRef]labels.Labels)
|
||||
}
|
||||
|
||||
// NewHead opens the head block in dir.
|
||||
|
@ -1763,15 +1763,16 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st
|
|||
func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _, _ int64, minMmapFile int) {
|
||||
var (
|
||||
deleted = map[storage.SeriesRef]struct{}{}
|
||||
deletedForCallback = []labels.Labels{}
|
||||
rmChunks = 0
|
||||
actualMint int64 = math.MaxInt64
|
||||
minOOOTime int64 = math.MaxInt64
|
||||
deletedFromPrevStripe = 0
|
||||
)
|
||||
minMmapFile = math.MaxInt32
|
||||
// Run through all series and truncate old chunks. Mark those with no
|
||||
// chunks left as deleted and store their ID.
|
||||
for i := 0; i < s.size; i++ {
|
||||
deletedForCallback := make(map[chunks.HeadSeriesRef]labels.Labels, deletedFromPrevStripe)
|
||||
s.locks[i].Lock()
|
||||
|
||||
for hash, all := range s.hashes[i] {
|
||||
|
@ -1825,7 +1826,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
|
|||
deleted[storage.SeriesRef(series.ref)] = struct{}{}
|
||||
s.hashes[i].del(hash, series.lset)
|
||||
delete(s.series[j], series.ref)
|
||||
deletedForCallback = append(deletedForCallback, series.lset)
|
||||
deletedForCallback[series.ref] = series.lset
|
||||
|
||||
if i != j {
|
||||
s.locks[j].Unlock()
|
||||
|
@ -1837,8 +1838,8 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
|
|||
|
||||
s.locks[i].Unlock()
|
||||
|
||||
s.seriesLifecycleCallback.PostDeletion(deletedForCallback...)
|
||||
deletedForCallback = deletedForCallback[:0]
|
||||
s.seriesLifecycleCallback.PostDeletion(deletedForCallback)
|
||||
deletedFromPrevStripe = len(deletedForCallback)
|
||||
}
|
||||
|
||||
if actualMint == math.MaxInt64 {
|
||||
|
@ -2177,7 +2178,7 @@ type noopSeriesLifecycleCallback struct{}
|
|||
|
||||
func (noopSeriesLifecycleCallback) PreCreation(labels.Labels) error { return nil }
|
||||
func (noopSeriesLifecycleCallback) PostCreation(labels.Labels) {}
|
||||
func (noopSeriesLifecycleCallback) PostDeletion(...labels.Labels) {}
|
||||
func (noopSeriesLifecycleCallback) PostDeletion(map[chunks.HeadSeriesRef]labels.Labels) {}
|
||||
|
||||
func (h *Head) Size() int64 {
|
||||
var walSize, wblSize int64
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"go.uber.org/atomic"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
)
|
||||
|
||||
func BenchmarkHeadStripeSeriesCreate(b *testing.B) {
|
||||
|
@ -82,4 +83,4 @@ type failingSeriesLifecycleCallback struct{}
|
|||
|
||||
func (failingSeriesLifecycleCallback) PreCreation(labels.Labels) error { return errors.New("failed") }
|
||||
func (failingSeriesLifecycleCallback) PostCreation(labels.Labels) {}
|
||||
func (failingSeriesLifecycleCallback) PostDeletion(...labels.Labels) {}
|
||||
func (failingSeriesLifecycleCallback) PostDeletion(map[chunks.HeadSeriesRef]labels.Labels) {}
|
||||
|
|
Loading…
Reference in a new issue