mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-25 05:34:05 -08:00
tsdb: use NewRangeHead instead. (#7793)
Signed-off-by: johncming <johncming@yahoo.com>
This commit is contained in:
parent
a55c69c4c3
commit
d19fc71903
24
tsdb/db.go
24
tsdb/db.go
|
@ -346,11 +346,7 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) {
|
|||
}
|
||||
mint := head.MinTime()
|
||||
maxt := head.MaxTime()
|
||||
rh := &RangeHead{
|
||||
head: head,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
}
|
||||
rh := NewRangeHead(head, mint, maxt)
|
||||
compactor, err := NewLeveledCompactor(
|
||||
context.Background(),
|
||||
nil,
|
||||
|
@ -1367,11 +1363,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error {
|
|||
|
||||
mint := db.head.MinTime()
|
||||
maxt := db.head.MaxTime()
|
||||
head := &RangeHead{
|
||||
head: db.head,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
}
|
||||
head := NewRangeHead(db.head, mint, maxt)
|
||||
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
|
||||
// Because of this block intervals are always +1 than the total samples it includes.
|
||||
if _, err := db.compactor.Write(dir, head, mint, maxt+1, nil); err != nil {
|
||||
|
@ -1393,11 +1385,7 @@ func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, err
|
|||
}
|
||||
}
|
||||
if maxt >= db.head.MinTime() {
|
||||
blocks = append(blocks, &RangeHead{
|
||||
head: db.head,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
})
|
||||
blocks = append(blocks, NewRangeHead(db.head, mint, maxt))
|
||||
}
|
||||
|
||||
blockQueriers := make([]storage.Querier, 0, len(blocks))
|
||||
|
@ -1430,11 +1418,7 @@ func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQu
|
|||
}
|
||||
}
|
||||
if maxt >= db.head.MinTime() {
|
||||
blocks = append(blocks, &RangeHead{
|
||||
head: db.head,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
})
|
||||
blocks = append(blocks, NewRangeHead(db.head, mint, maxt))
|
||||
}
|
||||
|
||||
blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks))
|
||||
|
|
Loading…
Reference in a new issue