mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge pull request #212 from prometheus/fabxc
Cleanup, small refactoring, expose a few things
This commit is contained in:
commit
ad3c4849a9
27
block.go
27
block.go
|
@ -142,10 +142,9 @@ type Block struct {
|
||||||
dir string
|
dir string
|
||||||
meta BlockMeta
|
meta BlockMeta
|
||||||
|
|
||||||
chunkr ChunkReader
|
chunkr ChunkReader
|
||||||
indexr IndexReader
|
indexr IndexReader
|
||||||
|
tombstones TombstoneReader
|
||||||
tombstones tombstoneReader
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used
|
// OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used
|
||||||
|
@ -284,8 +283,7 @@ func (pb *Block) Delete(mint, maxt int64, ms ...labels.Matcher) error {
|
||||||
return ErrClosing
|
return ErrClosing
|
||||||
}
|
}
|
||||||
|
|
||||||
pr := newPostingsReader(pb.indexr)
|
p, absent, err := PostingsForMatchers(pb.indexr, ms...)
|
||||||
p, absent, err := pr.Select(ms...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "select series")
|
return errors.Wrap(err, "select series")
|
||||||
}
|
}
|
||||||
|
@ -293,7 +291,7 @@ func (pb *Block) Delete(mint, maxt int64, ms ...labels.Matcher) error {
|
||||||
ir := pb.indexr
|
ir := pb.indexr
|
||||||
|
|
||||||
// Choose only valid postings which have chunks in the time-range.
|
// Choose only valid postings which have chunks in the time-range.
|
||||||
stones := map[uint64]Intervals{}
|
stones := memTombstones{}
|
||||||
|
|
||||||
var lset labels.Labels
|
var lset labels.Labels
|
||||||
var chks []ChunkMeta
|
var chks []ChunkMeta
|
||||||
|
@ -325,16 +323,21 @@ Outer:
|
||||||
return p.Err()
|
return p.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge the current and new tombstones.
|
err = pb.tombstones.Iter(func(id uint64, ivs Intervals) error {
|
||||||
for k, v := range stones {
|
for _, iv := range ivs {
|
||||||
pb.tombstones.add(k, v[0])
|
stones.add(id, iv)
|
||||||
|
pb.meta.Stats.NumTombstones++
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
pb.tombstones = stones
|
||||||
|
|
||||||
if err := writeTombstoneFile(pb.dir, pb.tombstones); err != nil {
|
if err := writeTombstoneFile(pb.dir, pb.tombstones); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
pb.meta.Stats.NumTombstones = uint64(len(pb.tombstones))
|
|
||||||
return writeMetaFile(pb.dir, &pb.meta)
|
return writeMetaFile(pb.dir, &pb.meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
27
compact.go
27
compact.go
|
@ -52,7 +52,7 @@ type Compactor interface {
|
||||||
Plan(dir string) ([]string, error)
|
Plan(dir string) ([]string, error)
|
||||||
|
|
||||||
// Write persists a Block into a directory.
|
// Write persists a Block into a directory.
|
||||||
Write(dest string, b BlockReader, mint, maxt int64) error
|
Write(dest string, b BlockReader, mint, maxt int64) (ulid.ULID, error)
|
||||||
|
|
||||||
// Compact runs compaction against the provided directories. Must
|
// Compact runs compaction against the provided directories. Must
|
||||||
// only be called concurrently with results of Plan().
|
// only be called concurrently with results of Plan().
|
||||||
|
@ -321,7 +321,7 @@ func (c *LeveledCompactor) Compact(dest string, dirs ...string) (err error) {
|
||||||
return c.write(dest, compactBlockMetas(uid, metas...), blocks...)
|
return c.write(dest, compactBlockMetas(uid, metas...), blocks...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64) error {
|
func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64) (ulid.ULID, error) {
|
||||||
entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
|
entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
uid := ulid.MustNew(ulid.Now(), entropy)
|
uid := ulid.MustNew(ulid.Now(), entropy)
|
||||||
|
|
||||||
|
@ -333,7 +333,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64) e
|
||||||
meta.Compaction.Level = 1
|
meta.Compaction.Level = 1
|
||||||
meta.Compaction.Sources = []ulid.ULID{uid}
|
meta.Compaction.Sources = []ulid.ULID{uid}
|
||||||
|
|
||||||
return c.write(dest, meta, b)
|
return uid, c.write(dest, meta, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// instrumentedChunkWriter is used for level 1 compactions to record statistics
|
// instrumentedChunkWriter is used for level 1 compactions to record statistics
|
||||||
|
@ -418,7 +418,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create an empty tombstones file.
|
// Create an empty tombstones file.
|
||||||
if err := writeTombstoneFile(tmp, newEmptyTombstoneReader()); err != nil {
|
if err := writeTombstoneFile(tmp, EmptyTombstoneReader()); err != nil {
|
||||||
return errors.Wrap(err, "write new tombstones file")
|
return errors.Wrap(err, "write new tombstones file")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -453,7 +453,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
|
||||||
// of the provided blocks. It returns meta information for the new block.
|
// of the provided blocks. It returns meta information for the new block.
|
||||||
func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error {
|
func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error {
|
||||||
var (
|
var (
|
||||||
set compactionSet
|
set ChunkSeriesSet
|
||||||
allSymbols = make(map[string]struct{}, 1<<16)
|
allSymbols = make(map[string]struct{}, 1<<16)
|
||||||
closers = []io.Closer{}
|
closers = []io.Closer{}
|
||||||
)
|
)
|
||||||
|
@ -597,18 +597,11 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type compactionSet interface {
|
|
||||||
Next() bool
|
|
||||||
At() (labels.Labels, []ChunkMeta, Intervals)
|
|
||||||
Err() error
|
|
||||||
}
|
|
||||||
|
|
||||||
type compactionSeriesSet struct {
|
type compactionSeriesSet struct {
|
||||||
p Postings
|
p Postings
|
||||||
index IndexReader
|
index IndexReader
|
||||||
chunks ChunkReader
|
chunks ChunkReader
|
||||||
tombstones TombstoneReader
|
tombstones TombstoneReader
|
||||||
series SeriesSet
|
|
||||||
|
|
||||||
l labels.Labels
|
l labels.Labels
|
||||||
c []ChunkMeta
|
c []ChunkMeta
|
||||||
|
@ -631,7 +624,11 @@ func (c *compactionSeriesSet) Next() bool {
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
c.intervals = c.tombstones.Get(c.p.At())
|
c.intervals, err = c.tombstones.Get(c.p.At())
|
||||||
|
if err != nil {
|
||||||
|
c.err = errors.Wrap(err, "get tombstones")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if err = c.index.Series(c.p.At(), &c.l, &c.c); err != nil {
|
if err = c.index.Series(c.p.At(), &c.l, &c.c); err != nil {
|
||||||
c.err = errors.Wrapf(err, "get series %d", c.p.At())
|
c.err = errors.Wrapf(err, "get series %d", c.p.At())
|
||||||
|
@ -675,7 +672,7 @@ func (c *compactionSeriesSet) At() (labels.Labels, []ChunkMeta, Intervals) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type compactionMerger struct {
|
type compactionMerger struct {
|
||||||
a, b compactionSet
|
a, b ChunkSeriesSet
|
||||||
|
|
||||||
aok, bok bool
|
aok, bok bool
|
||||||
l labels.Labels
|
l labels.Labels
|
||||||
|
@ -688,7 +685,7 @@ type compactionSeries struct {
|
||||||
chunks []*ChunkMeta
|
chunks []*ChunkMeta
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCompactionMerger(a, b compactionSet) (*compactionMerger, error) {
|
func newCompactionMerger(a, b ChunkSeriesSet) (*compactionMerger, error) {
|
||||||
c := &compactionMerger{
|
c := &compactionMerger{
|
||||||
a: a,
|
a: a,
|
||||||
b: b,
|
b: b,
|
||||||
|
|
5
db.go
5
db.go
|
@ -351,7 +351,7 @@ func (db *DB) compact() (changes bool, err error) {
|
||||||
mint: mint,
|
mint: mint,
|
||||||
maxt: maxt,
|
maxt: maxt,
|
||||||
}
|
}
|
||||||
if err = db.compactor.Write(db.dir, head, mint, maxt); err != nil {
|
if _, err = db.compactor.Write(db.dir, head, mint, maxt); err != nil {
|
||||||
return changes, errors.Wrap(err, "persist head block")
|
return changes, errors.Wrap(err, "persist head block")
|
||||||
}
|
}
|
||||||
changes = true
|
changes = true
|
||||||
|
@ -619,7 +619,8 @@ func (db *DB) Snapshot(dir string) error {
|
||||||
return errors.Wrap(err, "error snapshotting headblock")
|
return errors.Wrap(err, "error snapshotting headblock")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return db.compactor.Write(dir, db.head, db.head.MinTime(), db.head.MaxTime())
|
_, err := db.compactor.Write(dir, db.head, db.head.MinTime(), db.head.MaxTime())
|
||||||
|
return errors.Wrap(err, "snapshot head block")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Querier returns a new querier over the data partition for the given time range.
|
// Querier returns a new querier over the data partition for the given time range.
|
||||||
|
|
7
head.go
7
head.go
|
@ -66,7 +66,7 @@ type Head struct {
|
||||||
|
|
||||||
postings *memPostings // postings lists for terms
|
postings *memPostings // postings lists for terms
|
||||||
|
|
||||||
tombstones tombstoneReader
|
tombstones memTombstones
|
||||||
}
|
}
|
||||||
|
|
||||||
type headMetrics struct {
|
type headMetrics struct {
|
||||||
|
@ -186,7 +186,7 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal WAL, chunkRange int64) (
|
||||||
values: map[string]stringset{},
|
values: map[string]stringset{},
|
||||||
symbols: map[string]struct{}{},
|
symbols: map[string]struct{}{},
|
||||||
postings: newUnorderedMemPostings(),
|
postings: newUnorderedMemPostings(),
|
||||||
tombstones: newEmptyTombstoneReader(),
|
tombstones: memTombstones{},
|
||||||
}
|
}
|
||||||
h.metrics = newHeadMetrics(h, r)
|
h.metrics = newHeadMetrics(h, r)
|
||||||
|
|
||||||
|
@ -574,8 +574,7 @@ func (h *Head) Delete(mint, maxt int64, ms ...labels.Matcher) error {
|
||||||
|
|
||||||
ir := h.indexRange(mint, maxt)
|
ir := h.indexRange(mint, maxt)
|
||||||
|
|
||||||
pr := newPostingsReader(ir)
|
p, absent, err := PostingsForMatchers(ir, ms...)
|
||||||
p, absent, err := pr.Select(ms...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "select series")
|
return errors.Wrap(err, "select series")
|
||||||
}
|
}
|
||||||
|
|
|
@ -318,7 +318,7 @@ func TestHeadDeleteSimple(t *testing.T) {
|
||||||
Outer:
|
Outer:
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
// Reset the tombstones.
|
// Reset the tombstones.
|
||||||
head.tombstones = newEmptyTombstoneReader()
|
head.tombstones = memTombstones{}
|
||||||
|
|
||||||
// Delete the ranges.
|
// Delete the ranges.
|
||||||
for _, r := range c.intervals {
|
for _, r := range c.intervals {
|
||||||
|
|
73
querier.go
73
querier.go
|
@ -151,22 +151,13 @@ type blockQuerier struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *blockQuerier) Select(ms ...labels.Matcher) (SeriesSet, error) {
|
func (q *blockQuerier) Select(ms ...labels.Matcher) (SeriesSet, error) {
|
||||||
pr := newPostingsReader(q.index)
|
base, err := LookupChunkSeries(q.index, q.tombstones, ms...)
|
||||||
|
|
||||||
p, absent, err := pr.Select(ms...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &blockSeriesSet{
|
return &blockSeriesSet{
|
||||||
set: &populatedChunkSeries{
|
set: &populatedChunkSeries{
|
||||||
set: &baseChunkSeries{
|
set: base,
|
||||||
p: p,
|
|
||||||
index: q.index,
|
|
||||||
absent: absent,
|
|
||||||
|
|
||||||
tombstones: q.tombstones,
|
|
||||||
},
|
|
||||||
chunks: q.chunks,
|
chunks: q.chunks,
|
||||||
mint: q.mint,
|
mint: q.mint,
|
||||||
maxt: q.maxt,
|
maxt: q.maxt,
|
||||||
|
@ -208,16 +199,10 @@ func (q *blockQuerier) Close() error {
|
||||||
return merr.Err()
|
return merr.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// postingsReader is used to select matching postings from an IndexReader.
|
// PostingsForMatchers assembles a single postings iterator against the index reader
|
||||||
type postingsReader struct {
|
// based on the given matchers. It returns a list of label names that must be manually
|
||||||
index IndexReader
|
// checked to not exist in series the postings list points to.
|
||||||
}
|
func PostingsForMatchers(index IndexReader, ms ...labels.Matcher) (Postings, []string, error) {
|
||||||
|
|
||||||
func newPostingsReader(i IndexReader) *postingsReader {
|
|
||||||
return &postingsReader{index: i}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *postingsReader) Select(ms ...labels.Matcher) (Postings, []string, error) {
|
|
||||||
var (
|
var (
|
||||||
its []Postings
|
its []Postings
|
||||||
absent []string
|
absent []string
|
||||||
|
@ -229,16 +214,13 @@ func (r *postingsReader) Select(ms ...labels.Matcher) (Postings, []string, error
|
||||||
absent = append(absent, m.Name())
|
absent = append(absent, m.Name())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
it, err := r.selectSingle(m)
|
it, err := postingsForMatcher(index, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
its = append(its, it)
|
its = append(its, it)
|
||||||
}
|
}
|
||||||
|
return index.SortedPostings(Intersect(its...)), absent, nil
|
||||||
p := Intersect(its...)
|
|
||||||
|
|
||||||
return r.index.SortedPostings(p), absent, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// tuplesByPrefix uses binary search to find prefix matches within ts.
|
// tuplesByPrefix uses binary search to find prefix matches within ts.
|
||||||
|
@ -272,17 +254,17 @@ func tuplesByPrefix(m *labels.PrefixMatcher, ts StringTuples) ([]string, error)
|
||||||
return matches, nil
|
return matches, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *postingsReader) selectSingle(m labels.Matcher) (Postings, error) {
|
func postingsForMatcher(index IndexReader, m labels.Matcher) (Postings, error) {
|
||||||
// Fast-path for equal matching.
|
// Fast-path for equal matching.
|
||||||
if em, ok := m.(*labels.EqualMatcher); ok {
|
if em, ok := m.(*labels.EqualMatcher); ok {
|
||||||
it, err := r.index.Postings(em.Name(), em.Value())
|
it, err := index.Postings(em.Name(), em.Value())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return it, nil
|
return it, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
tpls, err := r.index.LabelValues(m.Name())
|
tpls, err := index.LabelValues(m.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -313,7 +295,7 @@ func (r *postingsReader) selectSingle(m labels.Matcher) (Postings, error) {
|
||||||
var rit []Postings
|
var rit []Postings
|
||||||
|
|
||||||
for _, v := range res {
|
for _, v := range res {
|
||||||
it, err := r.index.Postings(m.Name(), v)
|
it, err := index.Postings(m.Name(), v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -435,7 +417,7 @@ func (s *mergedSeriesSet) Next() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
type chunkSeriesSet interface {
|
type ChunkSeriesSet interface {
|
||||||
Next() bool
|
Next() bool
|
||||||
At() (labels.Labels, []ChunkMeta, Intervals)
|
At() (labels.Labels, []ChunkMeta, Intervals)
|
||||||
Err() error
|
Err() error
|
||||||
|
@ -455,6 +437,24 @@ type baseChunkSeries struct {
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LookupChunkSeries retrieves all series for the given matchers and returns a ChunkSeriesSet
|
||||||
|
// over them. It drops chunks based on tombstones in the given reader.
|
||||||
|
func LookupChunkSeries(ir IndexReader, tr TombstoneReader, ms ...labels.Matcher) (ChunkSeriesSet, error) {
|
||||||
|
if tr == nil {
|
||||||
|
tr = EmptyTombstoneReader()
|
||||||
|
}
|
||||||
|
p, absent, err := PostingsForMatchers(ir, ms...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &baseChunkSeries{
|
||||||
|
p: p,
|
||||||
|
index: ir,
|
||||||
|
tombstones: tr,
|
||||||
|
absent: absent,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *baseChunkSeries) At() (labels.Labels, []ChunkMeta, Intervals) {
|
func (s *baseChunkSeries) At() (labels.Labels, []ChunkMeta, Intervals) {
|
||||||
return s.lset, s.chks, s.intervals
|
return s.lset, s.chks, s.intervals
|
||||||
}
|
}
|
||||||
|
@ -465,6 +465,7 @@ func (s *baseChunkSeries) Next() bool {
|
||||||
var (
|
var (
|
||||||
lset labels.Labels
|
lset labels.Labels
|
||||||
chunks []ChunkMeta
|
chunks []ChunkMeta
|
||||||
|
err error
|
||||||
)
|
)
|
||||||
Outer:
|
Outer:
|
||||||
for s.p.Next() {
|
for s.p.Next() {
|
||||||
|
@ -487,7 +488,11 @@ Outer:
|
||||||
|
|
||||||
s.lset = lset
|
s.lset = lset
|
||||||
s.chks = chunks
|
s.chks = chunks
|
||||||
s.intervals = s.tombstones.Get(s.p.At())
|
s.intervals, err = s.tombstones.Get(s.p.At())
|
||||||
|
if err != nil {
|
||||||
|
s.err = errors.Wrap(err, "get tombstones")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if len(s.intervals) > 0 {
|
if len(s.intervals) > 0 {
|
||||||
// Only those chunks that are not entirely deleted.
|
// Only those chunks that are not entirely deleted.
|
||||||
|
@ -513,7 +518,7 @@ Outer:
|
||||||
// with known chunk references. It filters out chunks that do not fit the
|
// with known chunk references. It filters out chunks that do not fit the
|
||||||
// given time range.
|
// given time range.
|
||||||
type populatedChunkSeries struct {
|
type populatedChunkSeries struct {
|
||||||
set chunkSeriesSet
|
set ChunkSeriesSet
|
||||||
chunks ChunkReader
|
chunks ChunkReader
|
||||||
mint, maxt int64
|
mint, maxt int64
|
||||||
|
|
||||||
|
@ -570,7 +575,7 @@ func (s *populatedChunkSeries) Next() bool {
|
||||||
|
|
||||||
// blockSeriesSet is a set of series from an inverted index query.
|
// blockSeriesSet is a set of series from an inverted index query.
|
||||||
type blockSeriesSet struct {
|
type blockSeriesSet struct {
|
||||||
set chunkSeriesSet
|
set ChunkSeriesSet
|
||||||
err error
|
err error
|
||||||
cur Series
|
cur Series
|
||||||
|
|
||||||
|
|
|
@ -454,7 +454,7 @@ Outer:
|
||||||
querier := &blockQuerier{
|
querier := &blockQuerier{
|
||||||
index: ir,
|
index: ir,
|
||||||
chunks: cr,
|
chunks: cr,
|
||||||
tombstones: newEmptyTombstoneReader(),
|
tombstones: EmptyTombstoneReader(),
|
||||||
|
|
||||||
mint: c.mint,
|
mint: c.mint,
|
||||||
maxt: c.maxt,
|
maxt: c.maxt,
|
||||||
|
@ -506,7 +506,7 @@ func TestBlockQuerierDelete(t *testing.T) {
|
||||||
chunks [][]sample
|
chunks [][]sample
|
||||||
}
|
}
|
||||||
|
|
||||||
tombstones tombstoneReader
|
tombstones TombstoneReader
|
||||||
queries []query
|
queries []query
|
||||||
}{
|
}{
|
||||||
data: []struct {
|
data: []struct {
|
||||||
|
@ -554,13 +554,11 @@ func TestBlockQuerierDelete(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
tombstones: newTombstoneReader(
|
tombstones: memTombstones{
|
||||||
map[uint64]Intervals{
|
1: Intervals{{1, 3}},
|
||||||
1: Intervals{{1, 3}},
|
2: Intervals{{1, 3}, {6, 10}},
|
||||||
2: Intervals{{1, 3}, {6, 10}},
|
3: Intervals{{6, 10}},
|
||||||
3: Intervals{{6, 10}},
|
},
|
||||||
},
|
|
||||||
),
|
|
||||||
|
|
||||||
queries: []query{
|
queries: []query{
|
||||||
{
|
{
|
||||||
|
@ -736,7 +734,7 @@ func TestBaseChunkSeries(t *testing.T) {
|
||||||
bcs := &baseChunkSeries{
|
bcs := &baseChunkSeries{
|
||||||
p: newListPostings(tc.postings),
|
p: newListPostings(tc.postings),
|
||||||
index: mi,
|
index: mi,
|
||||||
tombstones: newEmptyTombstoneReader(),
|
tombstones: EmptyTombstoneReader(),
|
||||||
}
|
}
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
|
|
|
@ -35,12 +35,17 @@ const (
|
||||||
|
|
||||||
// TombstoneReader gives access to tombstone intervals by series reference.
|
// TombstoneReader gives access to tombstone intervals by series reference.
|
||||||
type TombstoneReader interface {
|
type TombstoneReader interface {
|
||||||
Get(ref uint64) Intervals
|
// Get returns deletion intervals for the series with the given reference.
|
||||||
|
Get(ref uint64) (Intervals, error)
|
||||||
|
|
||||||
|
// Iter calls the given function for each encountered interval.
|
||||||
|
Iter(func(uint64, Intervals) error) error
|
||||||
|
|
||||||
|
// Close any underlying resources
|
||||||
Close() error
|
Close() error
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeTombstoneFile(dir string, tr tombstoneReader) error {
|
func writeTombstoneFile(dir string, tr TombstoneReader) error {
|
||||||
path := filepath.Join(dir, tombstoneFilename)
|
path := filepath.Join(dir, tombstoneFilename)
|
||||||
tmp := path + ".tmp"
|
tmp := path + ".tmp"
|
||||||
hash := newCRC32()
|
hash := newCRC32()
|
||||||
|
@ -67,19 +72,21 @@ func writeTombstoneFile(dir string, tr tombstoneReader) error {
|
||||||
|
|
||||||
mw := io.MultiWriter(f, hash)
|
mw := io.MultiWriter(f, hash)
|
||||||
|
|
||||||
for k, v := range tr {
|
tr.Iter(func(ref uint64, ivs Intervals) error {
|
||||||
for _, itv := range v {
|
for _, iv := range ivs {
|
||||||
buf.reset()
|
buf.reset()
|
||||||
buf.putUvarint64(k)
|
|
||||||
buf.putVarint64(itv.Mint)
|
buf.putUvarint64(ref)
|
||||||
buf.putVarint64(itv.Maxt)
|
buf.putVarint64(iv.Mint)
|
||||||
|
buf.putVarint64(iv.Maxt)
|
||||||
|
|
||||||
_, err = mw.Write(buf.get())
|
_, err = mw.Write(buf.get())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
_, err = f.Write(hash.Sum(nil))
|
_, err = f.Write(hash.Sum(nil))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -100,7 +107,7 @@ type Stone struct {
|
||||||
intervals Intervals
|
intervals Intervals
|
||||||
}
|
}
|
||||||
|
|
||||||
func readTombstones(dir string) (tombstoneReader, error) {
|
func readTombstones(dir string) (memTombstones, error) {
|
||||||
b, err := ioutil.ReadFile(filepath.Join(dir, tombstoneFilename))
|
b, err := ioutil.ReadFile(filepath.Join(dir, tombstoneFilename))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -131,7 +138,8 @@ func readTombstones(dir string) (tombstoneReader, error) {
|
||||||
return nil, errors.New("checksum did not match")
|
return nil, errors.New("checksum did not match")
|
||||||
}
|
}
|
||||||
|
|
||||||
stonesMap := newEmptyTombstoneReader()
|
stonesMap := memTombstones{}
|
||||||
|
|
||||||
for d.len() > 0 {
|
for d.len() > 0 {
|
||||||
k := d.uvarint64()
|
k := d.uvarint64()
|
||||||
mint := d.varint64()
|
mint := d.varint64()
|
||||||
|
@ -143,28 +151,36 @@ func readTombstones(dir string) (tombstoneReader, error) {
|
||||||
stonesMap.add(k, Interval{mint, maxt})
|
stonesMap.add(k, Interval{mint, maxt})
|
||||||
}
|
}
|
||||||
|
|
||||||
return newTombstoneReader(stonesMap), nil
|
return stonesMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type tombstoneReader map[uint64]Intervals
|
type memTombstones map[uint64]Intervals
|
||||||
|
|
||||||
func newTombstoneReader(ts map[uint64]Intervals) tombstoneReader {
|
var emptyTombstoneReader = memTombstones{}
|
||||||
return tombstoneReader(ts)
|
|
||||||
|
// EmptyTombstoneReader returns a TombstoneReader that is always empty.
|
||||||
|
func EmptyTombstoneReader() TombstoneReader {
|
||||||
|
return emptyTombstoneReader
|
||||||
}
|
}
|
||||||
|
|
||||||
func newEmptyTombstoneReader() tombstoneReader {
|
func (t memTombstones) Get(ref uint64) (Intervals, error) {
|
||||||
return tombstoneReader(make(map[uint64]Intervals))
|
return t[ref], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t tombstoneReader) Get(ref uint64) Intervals {
|
func (t memTombstones) Iter(f func(uint64, Intervals) error) error {
|
||||||
return t[ref]
|
for ref, ivs := range t {
|
||||||
|
if err := f(ref, ivs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t tombstoneReader) add(ref uint64, itv Interval) {
|
func (t memTombstones) add(ref uint64, itv Interval) {
|
||||||
t[ref] = t[ref].add(itv)
|
t[ref] = t[ref].add(itv)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tombstoneReader) Close() error {
|
func (memTombstones) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ func TestWriteAndReadbackTombStones(t *testing.T) {
|
||||||
|
|
||||||
ref := uint64(0)
|
ref := uint64(0)
|
||||||
|
|
||||||
stones := make(map[uint64]Intervals)
|
stones := memTombstones{}
|
||||||
// Generate the tombstones.
|
// Generate the tombstones.
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
ref += uint64(rand.Int31n(10)) + 1
|
ref += uint64(rand.Int31n(10)) + 1
|
||||||
|
@ -43,13 +43,13 @@ func TestWriteAndReadbackTombStones(t *testing.T) {
|
||||||
stones[ref] = dranges
|
stones[ref] = dranges
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, writeTombstoneFile(tmpdir, newTombstoneReader(stones)))
|
require.NoError(t, writeTombstoneFile(tmpdir, stones))
|
||||||
|
|
||||||
restr, err := readTombstones(tmpdir)
|
restr, err := readTombstones(tmpdir)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
exptr := newTombstoneReader(stones)
|
|
||||||
// Compare the two readers.
|
// Compare the two readers.
|
||||||
require.Equal(t, exptr, restr)
|
require.Equal(t, stones, restr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAddingNewIntervals(t *testing.T) {
|
func TestAddingNewIntervals(t *testing.T) {
|
||||||
|
|
Loading…
Reference in a new issue