tsdb: use NewRangeHead instead. (#7793)

Signed-off-by: johncming <johncming@yahoo.com>
This commit is contained in:
johncming 2020-08-13 17:55:35 +08:00 committed by GitHub
parent a55c69c4c3
commit d19fc71903
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -346,11 +346,7 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) {
} }
mint := head.MinTime() mint := head.MinTime()
maxt := head.MaxTime() maxt := head.MaxTime()
rh := &RangeHead{ rh := NewRangeHead(head, mint, maxt)
head: head,
mint: mint,
maxt: maxt,
}
compactor, err := NewLeveledCompactor( compactor, err := NewLeveledCompactor(
context.Background(), context.Background(),
nil, nil,
@ -1367,11 +1363,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error {
mint := db.head.MinTime() mint := db.head.MinTime()
maxt := db.head.MaxTime() maxt := db.head.MaxTime()
head := &RangeHead{ head := NewRangeHead(db.head, mint, maxt)
head: db.head,
mint: mint,
maxt: maxt,
}
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes. // Because of this block intervals are always +1 than the total samples it includes.
if _, err := db.compactor.Write(dir, head, mint, maxt+1, nil); err != nil { if _, err := db.compactor.Write(dir, head, mint, maxt+1, nil); err != nil {
@ -1393,11 +1385,7 @@ func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, err
} }
} }
if maxt >= db.head.MinTime() { if maxt >= db.head.MinTime() {
blocks = append(blocks, &RangeHead{ blocks = append(blocks, NewRangeHead(db.head, mint, maxt))
head: db.head,
mint: mint,
maxt: maxt,
})
} }
blockQueriers := make([]storage.Querier, 0, len(blocks)) blockQueriers := make([]storage.Querier, 0, len(blocks))
@ -1430,11 +1418,7 @@ func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQu
} }
} }
if maxt >= db.head.MinTime() { if maxt >= db.head.MinTime() {
blocks = append(blocks, &RangeHead{ blocks = append(blocks, NewRangeHead(db.head, mint, maxt))
head: db.head,
mint: mint,
maxt: maxt,
})
} }
blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)) blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks))