tsdb: use NewRangeHead instead. (#7793)

Signed-off-by: johncming <johncming@yahoo.com>
This commit is contained in:
johncming 2020-08-13 17:55:35 +08:00 committed by GitHub
parent a55c69c4c3
commit d19fc71903
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -346,11 +346,7 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) {
}
mint := head.MinTime()
maxt := head.MaxTime()
rh := &RangeHead{
head: head,
mint: mint,
maxt: maxt,
}
rh := NewRangeHead(head, mint, maxt)
compactor, err := NewLeveledCompactor(
context.Background(),
nil,
@ -1367,11 +1363,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error {
mint := db.head.MinTime()
maxt := db.head.MaxTime()
head := &RangeHead{
head: db.head,
mint: mint,
maxt: maxt,
}
head := NewRangeHead(db.head, mint, maxt)
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
if _, err := db.compactor.Write(dir, head, mint, maxt+1, nil); err != nil {
@ -1393,11 +1385,7 @@ func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, err
}
}
if maxt >= db.head.MinTime() {
blocks = append(blocks, &RangeHead{
head: db.head,
mint: mint,
maxt: maxt,
})
blocks = append(blocks, NewRangeHead(db.head, mint, maxt))
}
blockQueriers := make([]storage.Querier, 0, len(blocks))
@ -1430,11 +1418,7 @@ func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQu
}
}
if maxt >= db.head.MinTime() {
blocks = append(blocks, &RangeHead{
head: db.head,
mint: mint,
maxt: maxt,
})
blocks = append(blocks, NewRangeHead(db.head, mint, maxt))
}
blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks))