mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-24 05:04:05 -08:00
Remove multiple heads
This changes the structure to a single WAL backed by a single head block. Parts of the head block can be compacted. This relieves us from any head amangement and greatly simplifies any consistency and isolation concerns by just having a single head.
This commit is contained in:
parent
0fe67df9f2
commit
3901b6e70b
48
block.go
48
block.go
|
@ -26,14 +26,21 @@ import (
|
|||
"github.com/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
// DiskBlock handles reads against a Block of time series data.
|
||||
type DiskBlock interface {
|
||||
BlockReader
|
||||
|
||||
// Directory where block data is stored.
|
||||
Dir() string
|
||||
|
||||
// Stats returns statistics about the block.
|
||||
Meta() BlockMeta
|
||||
|
||||
Delete(mint, maxt int64, m ...labels.Matcher) error
|
||||
|
||||
Close() error
|
||||
}
|
||||
|
||||
type BlockReader interface {
|
||||
// Index returns an IndexReader over the block's data.
|
||||
Index() IndexReader
|
||||
|
||||
|
@ -42,31 +49,14 @@ type DiskBlock interface {
|
|||
|
||||
// Tombstones returns a TombstoneReader over the block's deleted data.
|
||||
Tombstones() TombstoneReader
|
||||
|
||||
// Delete deletes data from the block.
|
||||
Delete(mint, maxt int64, ms ...labels.Matcher) error
|
||||
|
||||
// Close releases all underlying resources of the block.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Block is an interface to a DiskBlock that can also be queried.
|
||||
type Block interface {
|
||||
DiskBlock
|
||||
Queryable
|
||||
Snapshottable
|
||||
}
|
||||
|
||||
// headBlock is a regular block that can still be appended to.
|
||||
type headBlock interface {
|
||||
Block
|
||||
Appendable
|
||||
|
||||
// ActiveWriters returns the number of currently active appenders.
|
||||
ActiveWriters() int
|
||||
// HighTimestamp returns the highest currently inserted timestamp.
|
||||
HighTimestamp() int64
|
||||
}
|
||||
// // Block is an interface to a DiskBlock that can also be queried.
|
||||
// type Block interface {
|
||||
// DiskBlock
|
||||
// Queryable
|
||||
// Snapshottable
|
||||
// }
|
||||
|
||||
// Snapshottable defines an entity that can be backedup online.
|
||||
type Snapshottable interface {
|
||||
|
@ -225,16 +215,6 @@ func (pb *persistedBlock) String() string {
|
|||
return pb.meta.ULID.String()
|
||||
}
|
||||
|
||||
func (pb *persistedBlock) Querier(mint, maxt int64) Querier {
|
||||
return &blockQuerier{
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
index: pb.Index(),
|
||||
chunks: pb.Chunks(),
|
||||
tombstones: pb.Tombstones(),
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *persistedBlock) Dir() string { return pb.dir }
|
||||
func (pb *persistedBlock) Index() IndexReader { return pb.indexr }
|
||||
func (pb *persistedBlock) Chunks() ChunkReader { return pb.chunkr }
|
||||
|
|
95
compact.go
95
compact.go
|
@ -14,7 +14,6 @@
|
|||
package tsdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -51,7 +50,7 @@ type Compactor interface {
|
|||
Plan(dir string) ([]string, error)
|
||||
|
||||
// Write persists a Block into a directory.
|
||||
Write(dest string, b Block) error
|
||||
Write(dest string, b BlockReader, mint, maxt int64) error
|
||||
|
||||
// Compact runs compaction against the provided directories. Must
|
||||
// only be called concurrently with results of Plan().
|
||||
|
@ -124,8 +123,6 @@ type compactionInfo struct {
|
|||
mint, maxt int64
|
||||
}
|
||||
|
||||
const compactionBlocksLen = 3
|
||||
|
||||
type dirMeta struct {
|
||||
dir string
|
||||
meta *BlockMeta
|
||||
|
@ -258,9 +255,12 @@ func splitByRange(ds []dirMeta, tr int64) [][]dirMeta {
|
|||
return splitDirs
|
||||
}
|
||||
|
||||
func compactBlockMetas(blocks ...BlockMeta) (res BlockMeta) {
|
||||
res.MinTime = blocks[0].MinTime
|
||||
res.MaxTime = blocks[len(blocks)-1].MaxTime
|
||||
func compactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
|
||||
res := &BlockMeta{
|
||||
ULID: uid,
|
||||
MinTime: blocks[0].MinTime,
|
||||
MaxTime: blocks[len(blocks)-1].MaxTime,
|
||||
}
|
||||
|
||||
sources := map[ulid.ULID]struct{}{}
|
||||
|
||||
|
@ -271,10 +271,6 @@ func compactBlockMetas(blocks ...BlockMeta) (res BlockMeta) {
|
|||
for _, s := range b.Compaction.Sources {
|
||||
sources[s] = struct{}{}
|
||||
}
|
||||
// If it's an in memory block, its ULID goes into the sources.
|
||||
if b.Compaction.Level == 0 {
|
||||
sources[b.ULID] = struct{}{}
|
||||
}
|
||||
}
|
||||
res.Compaction.Level++
|
||||
|
||||
|
@ -291,7 +287,8 @@ func compactBlockMetas(blocks ...BlockMeta) (res BlockMeta) {
|
|||
// Compact creates a new block in the compactor's directory from the blocks in the
|
||||
// provided directories.
|
||||
func (c *LeveledCompactor) Compact(dest string, dirs ...string) (err error) {
|
||||
var blocks []Block
|
||||
var blocks []BlockReader
|
||||
var metas []*BlockMeta
|
||||
|
||||
for _, d := range dirs {
|
||||
b, err := newPersistedBlock(d, c.opts.chunkPool)
|
||||
|
@ -300,31 +297,40 @@ func (c *LeveledCompactor) Compact(dest string, dirs ...string) (err error) {
|
|||
}
|
||||
defer b.Close()
|
||||
|
||||
meta, err := readMetaFile(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metas = append(metas, meta)
|
||||
blocks = append(blocks, b)
|
||||
}
|
||||
|
||||
entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
uid := ulid.MustNew(ulid.Now(), entropy)
|
||||
|
||||
return c.write(dest, uid, blocks...)
|
||||
return c.write(dest, compactBlockMetas(uid, metas...), blocks...)
|
||||
}
|
||||
|
||||
func (c *LeveledCompactor) Write(dest string, b Block) error {
|
||||
// Buffering blocks might have been created that often have no data.
|
||||
if b.Meta().Stats.NumSeries == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64) error {
|
||||
entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
uid := ulid.MustNew(ulid.Now(), entropy)
|
||||
|
||||
return c.write(dest, uid, b)
|
||||
meta := &BlockMeta{
|
||||
ULID: uid,
|
||||
MinTime: mint,
|
||||
MaxTime: maxt,
|
||||
}
|
||||
meta.Compaction.Level = 1
|
||||
meta.Compaction.Sources = []ulid.ULID{uid}
|
||||
|
||||
return c.write(dest, meta, b)
|
||||
}
|
||||
|
||||
// write creates a new block that is the union of the provided blocks into dir.
|
||||
// It cleans up all files of the old blocks after completing successfully.
|
||||
func (c *LeveledCompactor) write(dest string, uid ulid.ULID, blocks ...Block) (err error) {
|
||||
c.logger.Log("msg", "compact blocks", "blocks", fmt.Sprintf("%v", blocks))
|
||||
func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockReader) (err error) {
|
||||
c.logger.Log("msg", "compact blocks", "count", len(blocks), "mint", meta.MinTime, "maxt", meta.MaxTime)
|
||||
|
||||
defer func(t time.Time) {
|
||||
if err != nil {
|
||||
|
@ -334,7 +340,7 @@ func (c *LeveledCompactor) write(dest string, uid ulid.ULID, blocks ...Block) (e
|
|||
c.metrics.duration.Observe(time.Since(t).Seconds())
|
||||
}(time.Now())
|
||||
|
||||
dir := filepath.Join(dest, uid.String())
|
||||
dir := filepath.Join(dest, meta.ULID.String())
|
||||
tmp := dir + ".tmp"
|
||||
|
||||
if err = os.RemoveAll(tmp); err != nil {
|
||||
|
@ -356,11 +362,9 @@ func (c *LeveledCompactor) write(dest string, uid ulid.ULID, blocks ...Block) (e
|
|||
return errors.Wrap(err, "open index writer")
|
||||
}
|
||||
|
||||
meta, err := c.populateBlock(blocks, indexw, chunkw)
|
||||
if err != nil {
|
||||
if err := c.populateBlock(blocks, meta, indexw, chunkw); err != nil {
|
||||
return errors.Wrap(err, "write compaction")
|
||||
}
|
||||
meta.ULID = uid
|
||||
|
||||
if err = writeMetaFile(tmp, meta); err != nil {
|
||||
return errors.Wrap(err, "write merged meta")
|
||||
|
@ -398,18 +402,16 @@ func (c *LeveledCompactor) write(dest string, uid ulid.ULID, blocks ...Block) (e
|
|||
|
||||
// populateBlock fills the index and chunk writers with new data gathered as the union
|
||||
// of the provided blocks. It returns meta information for the new block.
|
||||
func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chunkw ChunkWriter) (*BlockMeta, error) {
|
||||
func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error {
|
||||
var (
|
||||
set compactionSet
|
||||
metas []BlockMeta
|
||||
allSymbols = make(map[string]struct{}, 1<<16)
|
||||
)
|
||||
for i, b := range blocks {
|
||||
metas = append(metas, b.Meta())
|
||||
|
||||
symbols, err := b.Index().Symbols()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read symbols")
|
||||
return errors.Wrap(err, "read symbols")
|
||||
}
|
||||
for s := range symbols {
|
||||
allSymbols[s] = struct{}{}
|
||||
|
@ -419,7 +421,7 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
|
|||
|
||||
all, err := indexr.Postings("", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
all = indexr.SortedPostings(all)
|
||||
|
||||
|
@ -431,7 +433,7 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
|
|||
}
|
||||
set, err = newCompactionMerger(set, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -440,11 +442,10 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
|
|||
postings = &memPostings{m: make(map[term][]uint32, 512)}
|
||||
values = map[string]stringset{}
|
||||
i = uint32(0)
|
||||
meta = compactBlockMetas(metas...)
|
||||
)
|
||||
|
||||
if err := indexw.AddSymbols(allSymbols); err != nil {
|
||||
return nil, errors.Wrap(err, "add symbols")
|
||||
return errors.Wrap(err, "add symbols")
|
||||
}
|
||||
|
||||
for set.Next() {
|
||||
|
@ -462,7 +463,7 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
|
|||
newChunk := chunks.NewXORChunk()
|
||||
app, err := newChunk.Appender()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
it := &deletedIterator{it: chk.Chunk.Iterator(), intervals: dranges}
|
||||
|
@ -476,11 +477,11 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
|
|||
}
|
||||
}
|
||||
if err := chunkw.WriteChunks(chks...); err != nil {
|
||||
return nil, err
|
||||
return errors.Wrap(err, "write chunks")
|
||||
}
|
||||
|
||||
if err := indexw.AddSeries(i, lset, chks...); err != nil {
|
||||
return nil, errors.Wrapf(err, "add series")
|
||||
return errors.Wrap(err, "add series")
|
||||
}
|
||||
|
||||
meta.Stats.NumChunks += uint64(len(chks))
|
||||
|
@ -508,7 +509,7 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
|
|||
i++
|
||||
}
|
||||
if set.Err() != nil {
|
||||
return nil, set.Err()
|
||||
return errors.Wrap(set.Err(), "iterate compaction set")
|
||||
}
|
||||
|
||||
s := make([]string, 0, 256)
|
||||
|
@ -519,13 +520,13 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
|
|||
s = append(s, x)
|
||||
}
|
||||
if err := indexw.WriteLabelIndex([]string{n}, s); err != nil {
|
||||
return nil, err
|
||||
return errors.Wrap(err, "write label index")
|
||||
}
|
||||
}
|
||||
|
||||
for t := range postings.m {
|
||||
if err := indexw.WritePostings(t.name, t.value, postings.get(t)); err != nil {
|
||||
return nil, err
|
||||
return errors.Wrap(err, "write postings")
|
||||
}
|
||||
}
|
||||
// Write a postings list containing all series.
|
||||
|
@ -534,10 +535,10 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
|
|||
all[i] = uint32(i)
|
||||
}
|
||||
if err := indexw.WritePostings("", "", newListPostings(all)); err != nil {
|
||||
return nil, err
|
||||
return errors.Wrap(err, "write 'all' postings")
|
||||
}
|
||||
|
||||
return &meta, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
type compactionSet interface {
|
||||
|
@ -572,9 +573,12 @@ func (c *compactionSeriesSet) Next() bool {
|
|||
if !c.p.Next() {
|
||||
return false
|
||||
}
|
||||
var err error
|
||||
|
||||
c.intervals = c.tombstones.Get(c.p.At())
|
||||
|
||||
if c.err = c.index.Series(c.p.At(), &c.l, &c.c); c.err != nil {
|
||||
if err = c.index.Series(c.p.At(), &c.l, &c.c); err != nil {
|
||||
c.err = errors.Wrapf(err, "get series %d", c.p.At())
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -593,8 +597,9 @@ func (c *compactionSeriesSet) Next() bool {
|
|||
for i := range c.c {
|
||||
chk := &c.c[i]
|
||||
|
||||
chk.Chunk, c.err = c.chunks.Chunk(chk.Ref)
|
||||
if c.err != nil {
|
||||
chk.Chunk, err = c.chunks.Chunk(chk.Ref)
|
||||
if err != nil {
|
||||
c.err = errors.Wrapf(err, "chunk %d not found", chk.Ref)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
347
db_test.go
347
db_test.go
|
@ -15,8 +15,10 @@ package tsdb
|
|||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
@ -24,8 +26,20 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func openTestDB(t testing.TB, opts *Options) (db *DB, close func()) {
|
||||
tmpdir, _ := ioutil.TempDir("", "test")
|
||||
|
||||
db, err := Open(tmpdir, nil, nil, opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Do not close the test database by default as it will deadlock on test failures.
|
||||
return db, func() {
|
||||
os.RemoveAll(tmpdir)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert a SeriesSet into a form useable with reflect.DeepEqual.
|
||||
func readSeriesSet(ss SeriesSet) (map[string][]sample, error) {
|
||||
func readSeriesSet(t testing.TB, ss SeriesSet) map[string][]sample {
|
||||
result := map[string][]sample{}
|
||||
|
||||
for ss.Next() {
|
||||
|
@ -37,31 +51,28 @@ func readSeriesSet(ss SeriesSet) (map[string][]sample, error) {
|
|||
t, v := it.At()
|
||||
samples = append(samples, sample{t: t, v: v})
|
||||
}
|
||||
require.NoError(t, it.Err())
|
||||
|
||||
name := series.Labels().String()
|
||||
result[name] = samples
|
||||
if err := ss.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
require.NoError(t, ss.Err())
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func TestDataAvailableOnlyAfterCommit(t *testing.T) {
|
||||
tmpdir, _ := ioutil.TempDir("", "test")
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
db, err := Open(tmpdir, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
db, close := openTestDB(t, nil)
|
||||
defer close()
|
||||
|
||||
app := db.Appender()
|
||||
_, err = app.Add(labels.FromStrings("foo", "bar"), 0, 0)
|
||||
|
||||
_, err := app.Add(labels.FromStrings("foo", "bar"), 0, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
querier := db.Querier(0, 1)
|
||||
seriesSet, err := readSeriesSet(querier.Select(labels.NewEqualMatcher("foo", "bar")))
|
||||
require.NoError(t, err)
|
||||
seriesSet := readSeriesSet(t, querier.Select(labels.NewEqualMatcher("foo", "bar")))
|
||||
|
||||
require.Equal(t, seriesSet, map[string][]sample{})
|
||||
require.NoError(t, querier.Close())
|
||||
|
||||
|
@ -71,23 +82,17 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) {
|
|||
querier = db.Querier(0, 1)
|
||||
defer querier.Close()
|
||||
|
||||
seriesSet, err = readSeriesSet(querier.Select(labels.NewEqualMatcher("foo", "bar")))
|
||||
require.NoError(t, err)
|
||||
seriesSet = readSeriesSet(t, querier.Select(labels.NewEqualMatcher("foo", "bar")))
|
||||
|
||||
require.Equal(t, seriesSet, map[string][]sample{`{foo="bar"}`: []sample{{t: 0, v: 0}}})
|
||||
}
|
||||
|
||||
func TestDataNotAvailableAfterRollback(t *testing.T) {
|
||||
tmpdir, _ := ioutil.TempDir("", "test")
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
db, err := Open(tmpdir, nil, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Error opening database: %q", err)
|
||||
}
|
||||
defer db.Close()
|
||||
db, close := openTestDB(t, nil)
|
||||
defer close()
|
||||
|
||||
app := db.Appender()
|
||||
_, err = app.Add(labels.FromStrings("foo", "bar"), 0, 0)
|
||||
_, err := app.Add(labels.FromStrings("foo", "bar"), 0, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = app.Rollback()
|
||||
|
@ -96,22 +101,18 @@ func TestDataNotAvailableAfterRollback(t *testing.T) {
|
|||
querier := db.Querier(0, 1)
|
||||
defer querier.Close()
|
||||
|
||||
seriesSet, err := readSeriesSet(querier.Select(labels.NewEqualMatcher("foo", "bar")))
|
||||
require.NoError(t, err)
|
||||
seriesSet := readSeriesSet(t, querier.Select(labels.NewEqualMatcher("foo", "bar")))
|
||||
|
||||
require.Equal(t, seriesSet, map[string][]sample{})
|
||||
}
|
||||
|
||||
func TestDBAppenderAddRef(t *testing.T) {
|
||||
tmpdir, _ := ioutil.TempDir("", "test")
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
db, err := Open(tmpdir, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
db, close := openTestDB(t, nil)
|
||||
defer close()
|
||||
|
||||
app1 := db.Appender()
|
||||
|
||||
ref, err := app1.Add(labels.FromStrings("a", "b"), 0, 0)
|
||||
ref, err := app1.Add(labels.FromStrings("a", "b"), 123, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// When a series is first created, refs don't work within that transaction.
|
||||
|
@ -122,35 +123,40 @@ func TestDBAppenderAddRef(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
app2 := db.Appender()
|
||||
defer app2.Rollback()
|
||||
|
||||
ref, err = app2.Add(labels.FromStrings("a", "b"), 1, 1)
|
||||
ref, err = app2.Add(labels.FromStrings("a", "b"), 133, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Ref must be prefixed with block ULID of the block we wrote to.
|
||||
id := db.blocks[len(db.blocks)-1].Meta().ULID
|
||||
require.Equal(t, string(id[:]), ref[:16])
|
||||
|
||||
// Reference must be valid to add another sample.
|
||||
err = app2.AddFast(ref, 2, 2)
|
||||
err = app2.AddFast(ref, 143, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// AddFast for the same timestamp must fail if the generation in the reference
|
||||
// doesn't add up.
|
||||
refb := []byte(ref)
|
||||
refb[15] ^= refb[15]
|
||||
err = app2.AddFast(string(refb), 1, 1)
|
||||
err = app2.AddFast("abc_invalid_xyz", 1, 1)
|
||||
require.EqualError(t, errors.Cause(err), ErrNotFound.Error())
|
||||
|
||||
require.NoError(t, app2.Commit())
|
||||
|
||||
q := db.Querier(0, 200)
|
||||
res := readSeriesSet(t, q.Select(labels.NewEqualMatcher("a", "b")))
|
||||
|
||||
require.Equal(t, map[string][]sample{
|
||||
labels.FromStrings("a", "b").String(): []sample{
|
||||
{t: 123, v: 0},
|
||||
{t: 133, v: 1},
|
||||
{t: 143, v: 2},
|
||||
},
|
||||
}, res)
|
||||
|
||||
require.NoError(t, q.Close())
|
||||
}
|
||||
|
||||
func TestDeleteSimple(t *testing.T) {
|
||||
numSamples := int64(10)
|
||||
|
||||
tmpdir, _ := ioutil.TempDir("", "test")
|
||||
defer os.RemoveAll(tmpdir)
|
||||
db, close := openTestDB(t, nil)
|
||||
defer close()
|
||||
|
||||
db, err := Open(tmpdir, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
app := db.Appender()
|
||||
|
||||
smpls := make([]float64, numSamples)
|
||||
|
@ -216,3 +222,246 @@ Outer:
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmendDatapointCausesError(t *testing.T) {
|
||||
db, close := openTestDB(t, nil)
|
||||
defer close()
|
||||
|
||||
app := db.Appender()
|
||||
_, err := app.Add(labels.Labels{}, 0, 0)
|
||||
require.NoError(t, err, "Failed to add sample")
|
||||
require.NoError(t, app.Commit(), "Unexpected error committing appender")
|
||||
|
||||
app = db.Appender()
|
||||
_, err = app.Add(labels.Labels{}, 0, 1)
|
||||
require.Equal(t, ErrAmendSample, err)
|
||||
require.NoError(t, app.Rollback(), "Unexpected error rolling back appender")
|
||||
}
|
||||
|
||||
func TestDuplicateNaNDatapointNoAmendError(t *testing.T) {
|
||||
db, close := openTestDB(t, nil)
|
||||
defer close()
|
||||
|
||||
app := db.Appender()
|
||||
_, err := app.Add(labels.Labels{}, 0, math.NaN())
|
||||
require.NoError(t, err, "Failed to add sample")
|
||||
require.NoError(t, app.Commit(), "Unexpected error committing appender")
|
||||
|
||||
app = db.Appender()
|
||||
_, err = app.Add(labels.Labels{}, 0, math.NaN())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestNonDuplicateNaNDatapointsCausesAmendError(t *testing.T) {
|
||||
db, close := openTestDB(t, nil)
|
||||
defer close()
|
||||
|
||||
app := db.Appender()
|
||||
_, err := app.Add(labels.Labels{}, 0, math.Float64frombits(0x7ff0000000000001))
|
||||
require.NoError(t, err, "Failed to add sample")
|
||||
require.NoError(t, app.Commit(), "Unexpected error committing appender")
|
||||
|
||||
app = db.Appender()
|
||||
_, err = app.Add(labels.Labels{}, 0, math.Float64frombits(0x7ff0000000000002))
|
||||
require.Equal(t, ErrAmendSample, err)
|
||||
}
|
||||
|
||||
func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
||||
db, close := openTestDB(t, nil)
|
||||
defer close()
|
||||
|
||||
// Append AmendedValue.
|
||||
app := db.Appender()
|
||||
_, err := app.Add(labels.Labels{{"a", "b"}}, 0, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = app.Add(labels.Labels{{"a", "b"}}, 0, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
// Make sure the right value is stored.
|
||||
q := db.Querier(0, 10)
|
||||
ss := q.Select(labels.NewEqualMatcher("a", "b"))
|
||||
ssMap := readSeriesSet(t, ss)
|
||||
|
||||
require.Equal(t, map[string][]sample{
|
||||
labels.New(labels.Label{"a", "b"}).String(): []sample{{0, 1}},
|
||||
}, ssMap)
|
||||
|
||||
require.NoError(t, q.Close())
|
||||
|
||||
// Append Out of Order Value.
|
||||
app = db.Appender()
|
||||
_, err = app.Add(labels.Labels{{"a", "b"}}, 10, 3)
|
||||
require.NoError(t, err)
|
||||
_, err = app.Add(labels.Labels{{"a", "b"}}, 7, 5)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
q = db.Querier(0, 10)
|
||||
ss = q.Select(labels.NewEqualMatcher("a", "b"))
|
||||
ssMap = readSeriesSet(t, ss)
|
||||
|
||||
require.Equal(t, map[string][]sample{
|
||||
labels.New(labels.Label{"a", "b"}).String(): []sample{{0, 1}, {10, 3}},
|
||||
}, ssMap)
|
||||
require.NoError(t, q.Close())
|
||||
}
|
||||
|
||||
func TestDB_e2e(t *testing.T) {
|
||||
const (
|
||||
numDatapoints = 1000
|
||||
numRanges = 1000
|
||||
timeInterval = int64(3)
|
||||
maxTime = int64(2 * 1000)
|
||||
minTime = int64(200)
|
||||
)
|
||||
// Create 8 series with 1000 data-points of different ranges and run queries.
|
||||
lbls := [][]labels.Label{
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
},
|
||||
}
|
||||
|
||||
seriesMap := map[string][]sample{}
|
||||
for _, l := range lbls {
|
||||
seriesMap[labels.New(l...).String()] = []sample{}
|
||||
}
|
||||
|
||||
db, close := openTestDB(t, nil)
|
||||
defer close()
|
||||
|
||||
app := db.Appender()
|
||||
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
series := []sample{}
|
||||
|
||||
ts := rand.Int63n(300)
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
v := rand.Float64()
|
||||
|
||||
series = append(series, sample{ts, v})
|
||||
|
||||
_, err := app.Add(lset, ts, v)
|
||||
require.NoError(t, err)
|
||||
|
||||
ts += rand.Int63n(timeInterval) + 1
|
||||
}
|
||||
|
||||
seriesMap[lset.String()] = series
|
||||
}
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
// Query each selector on 1000 random time-ranges.
|
||||
queries := []struct {
|
||||
ms []labels.Matcher
|
||||
}{
|
||||
{
|
||||
ms: []labels.Matcher{labels.NewEqualMatcher("a", "b")},
|
||||
},
|
||||
{
|
||||
ms: []labels.Matcher{
|
||||
labels.NewEqualMatcher("a", "b"),
|
||||
labels.NewEqualMatcher("job", "prom-k8s"),
|
||||
},
|
||||
},
|
||||
{
|
||||
ms: []labels.Matcher{
|
||||
labels.NewEqualMatcher("a", "c"),
|
||||
labels.NewEqualMatcher("instance", "localhost:9090"),
|
||||
labels.NewEqualMatcher("job", "prometheus"),
|
||||
},
|
||||
},
|
||||
// TODO: Add Regexp Matchers.
|
||||
}
|
||||
|
||||
for _, qry := range queries {
|
||||
matched := labels.Slice{}
|
||||
for _, ls := range lbls {
|
||||
s := labels.Selector(qry.ms)
|
||||
if s.Matches(ls) {
|
||||
matched = append(matched, ls)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(matched)
|
||||
|
||||
for i := 0; i < numRanges; i++ {
|
||||
mint := rand.Int63n(300)
|
||||
maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints))
|
||||
|
||||
t.Logf("run query %s, [%d, %d]", qry.ms, mint, maxt)
|
||||
|
||||
expected := map[string][]sample{}
|
||||
|
||||
// Build the mockSeriesSet.
|
||||
for _, m := range matched {
|
||||
smpls := boundedSamples(seriesMap[m.String()], mint, maxt)
|
||||
if len(smpls) > 0 {
|
||||
expected[m.String()] = smpls
|
||||
}
|
||||
}
|
||||
|
||||
q := db.Querier(mint, maxt)
|
||||
ss := q.Select(qry.ms...)
|
||||
|
||||
result := map[string][]sample{}
|
||||
|
||||
for ss.Next() {
|
||||
x := ss.At()
|
||||
|
||||
smpls, err := expandSeriesIterator(x.Iterator())
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(smpls) > 0 {
|
||||
result[x.Labels().String()] = smpls
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(t, ss.Err())
|
||||
require.Equal(t, expected, result)
|
||||
|
||||
q.Close()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
934
head_test.go
934
head_test.go
|
@ -15,12 +15,8 @@ package tsdb
|
|||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
@ -31,23 +27,6 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// createTestHeadBlock creates a new head block with a SegmentWAL.
|
||||
func createTestHeadBlock(t testing.TB, dir string, mint, maxt int64) *HeadBlock {
|
||||
dir, err := TouchHeadBlock(dir, mint, maxt)
|
||||
require.NoError(t, err)
|
||||
|
||||
return openTestHeadBlock(t, dir)
|
||||
}
|
||||
|
||||
func openTestHeadBlock(t testing.TB, dir string) *HeadBlock {
|
||||
wal, err := OpenSegmentWAL(dir, nil, 5*time.Second)
|
||||
require.NoError(t, err)
|
||||
|
||||
h, err := OpenHeadBlock(dir, nil, wal, nil)
|
||||
require.NoError(t, err)
|
||||
return h
|
||||
}
|
||||
|
||||
func BenchmarkCreateSeries(b *testing.B) {
|
||||
lbls, err := readPrometheusLabels("cmd/tsdb/testdata.1m", 1e6)
|
||||
require.NoError(b, err)
|
||||
|
@ -57,7 +36,10 @@ func BenchmarkCreateSeries(b *testing.B) {
|
|||
require.NoError(b, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
h := createTestHeadBlock(b, dir, 0, 1)
|
||||
h, err := NewHead(nil, nil, 10000)
|
||||
if err != nil {
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
@ -106,598 +88,321 @@ func readPrometheusLabels(fn string, n int) ([]labels.Labels, error) {
|
|||
return mets, nil
|
||||
}
|
||||
|
||||
func TestAmendDatapointCausesError(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir("", "test")
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
hb := createTestHeadBlock(t, dir, 0, 1000)
|
||||
|
||||
app := hb.Appender()
|
||||
_, err := app.Add(labels.Labels{}, 0, 0)
|
||||
require.NoError(t, err, "Failed to add sample")
|
||||
require.NoError(t, app.Commit(), "Unexpected error committing appender")
|
||||
|
||||
app = hb.Appender()
|
||||
_, err = app.Add(labels.Labels{}, 0, 1)
|
||||
require.Equal(t, ErrAmendSample, err)
|
||||
}
|
||||
|
||||
func TestDuplicateNaNDatapointNoAmendError(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir("", "test")
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
hb := createTestHeadBlock(t, dir, 0, 1000)
|
||||
|
||||
app := hb.Appender()
|
||||
_, err := app.Add(labels.Labels{}, 0, math.NaN())
|
||||
require.NoError(t, err, "Failed to add sample")
|
||||
require.NoError(t, app.Commit(), "Unexpected error committing appender")
|
||||
|
||||
app = hb.Appender()
|
||||
_, err = app.Add(labels.Labels{}, 0, math.NaN())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestNonDuplicateNaNDatapointsCausesAmendError(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir("", "test")
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
hb := createTestHeadBlock(t, dir, 0, 1000)
|
||||
|
||||
app := hb.Appender()
|
||||
_, err := app.Add(labels.Labels{}, 0, math.Float64frombits(0x7ff0000000000001))
|
||||
require.NoError(t, err, "Failed to add sample")
|
||||
require.NoError(t, app.Commit(), "Unexpected error committing appender")
|
||||
|
||||
app = hb.Appender()
|
||||
_, err = app.Add(labels.Labels{}, 0, math.Float64frombits(0x7ff0000000000002))
|
||||
require.Equal(t, ErrAmendSample, err)
|
||||
}
|
||||
|
||||
func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir("", "test")
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
hb := createTestHeadBlock(t, dir, 0, 1000)
|
||||
|
||||
// Append AmendedValue.
|
||||
app := hb.Appender()
|
||||
_, err := app.Add(labels.Labels{{"a", "b"}}, 0, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = app.Add(labels.Labels{{"a", "b"}}, 0, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
require.Equal(t, uint64(1), hb.Meta().Stats.NumSamples)
|
||||
|
||||
// Make sure the right value is stored.
|
||||
q := hb.Querier(0, 10)
|
||||
ss := q.Select(labels.NewEqualMatcher("a", "b"))
|
||||
ssMap, err := readSeriesSet(ss)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, map[string][]sample{
|
||||
labels.New(labels.Label{"a", "b"}).String(): []sample{{0, 1}},
|
||||
}, ssMap)
|
||||
|
||||
require.NoError(t, q.Close())
|
||||
|
||||
// Append Out of Order Value.
|
||||
app = hb.Appender()
|
||||
_, err = app.Add(labels.Labels{{"a", "b"}}, 10, 3)
|
||||
require.NoError(t, err)
|
||||
_, err = app.Add(labels.Labels{{"a", "b"}}, 7, 5)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
require.Equal(t, uint64(2), hb.Meta().Stats.NumSamples)
|
||||
|
||||
q = hb.Querier(0, 10)
|
||||
ss = q.Select(labels.NewEqualMatcher("a", "b"))
|
||||
ssMap, err = readSeriesSet(ss)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, map[string][]sample{
|
||||
labels.New(labels.Label{"a", "b"}).String(): []sample{{0, 1}, {10, 3}},
|
||||
}, ssMap)
|
||||
require.NoError(t, q.Close())
|
||||
}
|
||||
|
||||
func TestHeadBlock_e2e(t *testing.T) {
|
||||
numDatapoints := 1000
|
||||
numRanges := 1000
|
||||
timeInterval := int64(3)
|
||||
maxTime := int64(2 * 1000)
|
||||
minTime := int64(200)
|
||||
// Create 8 series with 1000 data-points of different ranges and run queries.
|
||||
lbls := [][]labels.Label{
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
},
|
||||
}
|
||||
|
||||
seriesMap := map[string][]sample{}
|
||||
for _, l := range lbls {
|
||||
seriesMap[labels.New(l...).String()] = []sample{}
|
||||
}
|
||||
|
||||
dir, _ := ioutil.TempDir("", "test")
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
hb := createTestHeadBlock(t, dir, minTime, maxTime)
|
||||
app := hb.Appender()
|
||||
|
||||
for _, l := range lbls {
|
||||
ls := labels.New(l...)
|
||||
series := []sample{}
|
||||
|
||||
ts := rand.Int63n(300)
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
v := rand.Float64()
|
||||
if ts >= minTime && ts <= maxTime {
|
||||
series = append(series, sample{ts, v})
|
||||
}
|
||||
|
||||
_, err := app.Add(ls, ts, v)
|
||||
if ts >= minTime && ts <= maxTime {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.EqualError(t, err, ErrOutOfBounds.Error())
|
||||
}
|
||||
|
||||
ts += rand.Int63n(timeInterval) + 1
|
||||
}
|
||||
|
||||
seriesMap[labels.New(l...).String()] = series
|
||||
}
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
// Query each selector on 1000 random time-ranges.
|
||||
queries := []struct {
|
||||
ms []labels.Matcher
|
||||
}{
|
||||
{
|
||||
ms: []labels.Matcher{labels.NewEqualMatcher("a", "b")},
|
||||
},
|
||||
{
|
||||
ms: []labels.Matcher{
|
||||
labels.NewEqualMatcher("a", "b"),
|
||||
labels.NewEqualMatcher("job", "prom-k8s"),
|
||||
},
|
||||
},
|
||||
{
|
||||
ms: []labels.Matcher{
|
||||
labels.NewEqualMatcher("a", "c"),
|
||||
labels.NewEqualMatcher("instance", "localhost:9090"),
|
||||
labels.NewEqualMatcher("job", "prometheus"),
|
||||
},
|
||||
},
|
||||
// TODO: Add Regexp Matchers.
|
||||
}
|
||||
|
||||
for _, qry := range queries {
|
||||
matched := labels.Slice{}
|
||||
for _, ls := range lbls {
|
||||
s := labels.Selector(qry.ms)
|
||||
if s.Matches(ls) {
|
||||
matched = append(matched, ls)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(matched)
|
||||
|
||||
for i := 0; i < numRanges; i++ {
|
||||
mint := rand.Int63n(300)
|
||||
maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints))
|
||||
|
||||
q := hb.Querier(mint, maxt)
|
||||
ss := q.Select(qry.ms...)
|
||||
|
||||
// Build the mockSeriesSet.
|
||||
matchedSeries := make([]Series, 0, len(matched))
|
||||
for _, m := range matched {
|
||||
smpls := boundedSamples(seriesMap[m.String()], mint, maxt)
|
||||
|
||||
// Only append those series for which samples exist as mockSeriesSet
|
||||
// doesn't skip series with no samples.
|
||||
// TODO: But sometimes SeriesSet returns an empty SeriesIterator
|
||||
if len(smpls) > 0 {
|
||||
matchedSeries = append(matchedSeries, newSeries(
|
||||
m.Map(),
|
||||
smpls,
|
||||
))
|
||||
}
|
||||
}
|
||||
expSs := newListSeriesSet(matchedSeries)
|
||||
|
||||
// Compare both SeriesSets.
|
||||
for {
|
||||
eok, rok := expSs.Next(), ss.Next()
|
||||
|
||||
// Skip a series if iterator is empty.
|
||||
if rok {
|
||||
for !ss.At().Iterator().Next() {
|
||||
rok = ss.Next()
|
||||
if !rok {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, eok, rok, "next")
|
||||
|
||||
if !eok {
|
||||
break
|
||||
}
|
||||
sexp := expSs.At()
|
||||
sres := ss.At()
|
||||
|
||||
require.Equal(t, sexp.Labels(), sres.Labels(), "labels")
|
||||
|
||||
smplExp, errExp := expandSeriesIterator(sexp.Iterator())
|
||||
smplRes, errRes := expandSeriesIterator(sres.Iterator())
|
||||
|
||||
require.Equal(t, errExp, errRes, "samples error")
|
||||
require.Equal(t, smplExp, smplRes, "samples")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func TestHBDeleteSimple(t *testing.T) {
|
||||
numSamples := int64(10)
|
||||
|
||||
dir, _ := ioutil.TempDir("", "test")
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
hb := createTestHeadBlock(t, dir, 0, numSamples)
|
||||
app := hb.Appender()
|
||||
|
||||
smpls := make([]float64, numSamples)
|
||||
for i := int64(0); i < numSamples; i++ {
|
||||
smpls[i] = rand.Float64()
|
||||
app.Add(labels.Labels{{"a", "b"}}, i, smpls[i])
|
||||
}
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
cases := []struct {
|
||||
intervals Intervals
|
||||
remaint []int64
|
||||
}{
|
||||
{
|
||||
intervals: Intervals{{0, 3}},
|
||||
remaint: []int64{4, 5, 6, 7, 8, 9},
|
||||
},
|
||||
{
|
||||
intervals: Intervals{{1, 3}},
|
||||
remaint: []int64{0, 4, 5, 6, 7, 8, 9},
|
||||
},
|
||||
{
|
||||
intervals: Intervals{{1, 3}, {4, 7}},
|
||||
remaint: []int64{0, 8, 9},
|
||||
},
|
||||
{
|
||||
intervals: Intervals{{1, 3}, {4, 700}},
|
||||
remaint: []int64{0},
|
||||
},
|
||||
{
|
||||
intervals: Intervals{{0, 9}},
|
||||
remaint: []int64{},
|
||||
},
|
||||
}
|
||||
|
||||
Outer:
|
||||
for _, c := range cases {
|
||||
// Reset the tombstones.
|
||||
hb.tombstones = newEmptyTombstoneReader()
|
||||
|
||||
// Delete the ranges.
|
||||
for _, r := range c.intervals {
|
||||
require.NoError(t, hb.Delete(r.Mint, r.Maxt, labels.NewEqualMatcher("a", "b")))
|
||||
}
|
||||
|
||||
// Compare the result.
|
||||
q := hb.Querier(0, numSamples)
|
||||
res := q.Select(labels.NewEqualMatcher("a", "b"))
|
||||
|
||||
expSamples := make([]sample, 0, len(c.remaint))
|
||||
for _, ts := range c.remaint {
|
||||
expSamples = append(expSamples, sample{ts, smpls[ts]})
|
||||
}
|
||||
|
||||
expss := newListSeriesSet([]Series{
|
||||
newSeries(map[string]string{"a": "b"}, expSamples),
|
||||
})
|
||||
|
||||
if len(expSamples) == 0 {
|
||||
require.False(t, res.Next())
|
||||
continue
|
||||
}
|
||||
|
||||
for {
|
||||
eok, rok := expss.Next(), res.Next()
|
||||
require.Equal(t, eok, rok, "next")
|
||||
|
||||
if !eok {
|
||||
continue Outer
|
||||
}
|
||||
sexp := expss.At()
|
||||
sres := res.At()
|
||||
|
||||
require.Equal(t, sexp.Labels(), sres.Labels(), "labels")
|
||||
|
||||
smplExp, errExp := expandSeriesIterator(sexp.Iterator())
|
||||
smplRes, errRes := expandSeriesIterator(sres.Iterator())
|
||||
|
||||
require.Equal(t, errExp, errRes, "samples error")
|
||||
require.Equal(t, smplExp, smplRes, "samples")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteUntilCurMax(t *testing.T) {
|
||||
numSamples := int64(10)
|
||||
|
||||
dir, _ := ioutil.TempDir("", "test")
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
hb := createTestHeadBlock(t, dir, 0, 2*numSamples)
|
||||
app := hb.Appender()
|
||||
|
||||
smpls := make([]float64, numSamples)
|
||||
for i := int64(0); i < numSamples; i++ {
|
||||
smpls[i] = rand.Float64()
|
||||
app.Add(labels.Labels{{"a", "b"}}, i, smpls[i])
|
||||
}
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
require.NoError(t, hb.Delete(0, 10000, labels.NewEqualMatcher("a", "b")))
|
||||
app = hb.Appender()
|
||||
_, err := app.Add(labels.Labels{{"a", "b"}}, 11, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
q := hb.Querier(0, 100000)
|
||||
res := q.Select(labels.NewEqualMatcher("a", "b"))
|
||||
|
||||
require.True(t, res.Next())
|
||||
exps := res.At()
|
||||
it := exps.Iterator()
|
||||
ressmpls, err := expandSeriesIterator(it)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []sample{{11, 1}}, ressmpls)
|
||||
}
|
||||
|
||||
func TestDelete_e2e(t *testing.T) {
|
||||
numDatapoints := 1000
|
||||
numRanges := 1000
|
||||
timeInterval := int64(2)
|
||||
maxTime := int64(2 * 1000)
|
||||
minTime := int64(200)
|
||||
// Create 8 series with 1000 data-points of different ranges, delete and run queries.
|
||||
lbls := [][]labels.Label{
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
},
|
||||
}
|
||||
|
||||
seriesMap := map[string][]sample{}
|
||||
for _, l := range lbls {
|
||||
seriesMap[labels.New(l...).String()] = []sample{}
|
||||
}
|
||||
|
||||
dir, _ := ioutil.TempDir("", "test")
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
hb := createTestHeadBlock(t, dir, minTime, maxTime)
|
||||
app := hb.Appender()
|
||||
|
||||
for _, l := range lbls {
|
||||
ls := labels.New(l...)
|
||||
series := []sample{}
|
||||
|
||||
ts := rand.Int63n(300)
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
v := rand.Float64()
|
||||
if ts >= minTime && ts <= maxTime {
|
||||
series = append(series, sample{ts, v})
|
||||
}
|
||||
|
||||
_, err := app.Add(ls, ts, v)
|
||||
if ts >= minTime && ts <= maxTime {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.EqualError(t, err, ErrOutOfBounds.Error())
|
||||
}
|
||||
|
||||
ts += rand.Int63n(timeInterval) + 1
|
||||
}
|
||||
|
||||
seriesMap[labels.New(l...).String()] = series
|
||||
}
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
// Delete a time-range from each-selector.
|
||||
dels := []struct {
|
||||
ms []labels.Matcher
|
||||
drange Intervals
|
||||
}{
|
||||
{
|
||||
ms: []labels.Matcher{labels.NewEqualMatcher("a", "b")},
|
||||
drange: Intervals{{300, 500}, {600, 670}},
|
||||
},
|
||||
{
|
||||
ms: []labels.Matcher{
|
||||
labels.NewEqualMatcher("a", "b"),
|
||||
labels.NewEqualMatcher("job", "prom-k8s"),
|
||||
},
|
||||
drange: Intervals{{300, 500}, {100, 670}},
|
||||
},
|
||||
{
|
||||
ms: []labels.Matcher{
|
||||
labels.NewEqualMatcher("a", "c"),
|
||||
labels.NewEqualMatcher("instance", "localhost:9090"),
|
||||
labels.NewEqualMatcher("job", "prometheus"),
|
||||
},
|
||||
drange: Intervals{{300, 400}, {100, 6700}},
|
||||
},
|
||||
// TODO: Add Regexp Matchers.
|
||||
}
|
||||
|
||||
for _, del := range dels {
|
||||
// Reset the deletes everytime.
|
||||
writeTombstoneFile(hb.dir, newEmptyTombstoneReader())
|
||||
hb.tombstones = newEmptyTombstoneReader()
|
||||
|
||||
for _, r := range del.drange {
|
||||
require.NoError(t, hb.Delete(r.Mint, r.Maxt, del.ms...))
|
||||
}
|
||||
|
||||
matched := labels.Slice{}
|
||||
for _, ls := range lbls {
|
||||
s := labels.Selector(del.ms)
|
||||
if s.Matches(ls) {
|
||||
matched = append(matched, ls)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(matched)
|
||||
|
||||
for i := 0; i < numRanges; i++ {
|
||||
mint := rand.Int63n(200)
|
||||
maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints))
|
||||
|
||||
q := hb.Querier(mint, maxt)
|
||||
ss := q.Select(del.ms...)
|
||||
|
||||
// Build the mockSeriesSet.
|
||||
matchedSeries := make([]Series, 0, len(matched))
|
||||
for _, m := range matched {
|
||||
smpls := boundedSamples(seriesMap[m.String()], mint, maxt)
|
||||
smpls = deletedSamples(smpls, del.drange)
|
||||
|
||||
// Only append those series for which samples exist as mockSeriesSet
|
||||
// doesn't skip series with no samples.
|
||||
// TODO: But sometimes SeriesSet returns an empty SeriesIterator
|
||||
if len(smpls) > 0 {
|
||||
matchedSeries = append(matchedSeries, newSeries(
|
||||
m.Map(),
|
||||
smpls,
|
||||
))
|
||||
}
|
||||
}
|
||||
expSs := newListSeriesSet(matchedSeries)
|
||||
|
||||
// Compare both SeriesSets.
|
||||
for {
|
||||
eok, rok := expSs.Next(), ss.Next()
|
||||
|
||||
// Skip a series if iterator is empty.
|
||||
if rok {
|
||||
for !ss.At().Iterator().Next() {
|
||||
rok = ss.Next()
|
||||
if !rok {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
require.Equal(t, eok, rok, "next")
|
||||
|
||||
if !eok {
|
||||
break
|
||||
}
|
||||
sexp := expSs.At()
|
||||
sres := ss.At()
|
||||
|
||||
require.Equal(t, sexp.Labels(), sres.Labels(), "labels")
|
||||
|
||||
smplExp, errExp := expandSeriesIterator(sexp.Iterator())
|
||||
smplRes, errRes := expandSeriesIterator(sres.Iterator())
|
||||
|
||||
require.Equal(t, errExp, errRes, "samples error")
|
||||
require.Equal(t, smplExp, smplRes, "samples")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
// func TestHBDeleteSimple(t *testing.T) {
|
||||
// numSamples := int64(10)
|
||||
|
||||
// hb, close := openTestDB(t, nil)
|
||||
// defer close()
|
||||
|
||||
// app := hb.Appender()
|
||||
|
||||
// smpls := make([]float64, numSamples)
|
||||
// for i := int64(0); i < numSamples; i++ {
|
||||
// smpls[i] = rand.Float64()
|
||||
// app.Add(labels.Labels{{"a", "b"}}, i, smpls[i])
|
||||
// }
|
||||
|
||||
// require.NoError(t, app.Commit())
|
||||
// cases := []struct {
|
||||
// intervals Intervals
|
||||
// remaint []int64
|
||||
// }{
|
||||
// {
|
||||
// intervals: Intervals{{0, 3}},
|
||||
// remaint: []int64{4, 5, 6, 7, 8, 9},
|
||||
// },
|
||||
// {
|
||||
// intervals: Intervals{{1, 3}},
|
||||
// remaint: []int64{0, 4, 5, 6, 7, 8, 9},
|
||||
// },
|
||||
// {
|
||||
// intervals: Intervals{{1, 3}, {4, 7}},
|
||||
// remaint: []int64{0, 8, 9},
|
||||
// },
|
||||
// {
|
||||
// intervals: Intervals{{1, 3}, {4, 700}},
|
||||
// remaint: []int64{0},
|
||||
// },
|
||||
// {
|
||||
// intervals: Intervals{{0, 9}},
|
||||
// remaint: []int64{},
|
||||
// },
|
||||
// }
|
||||
|
||||
// Outer:
|
||||
// for _, c := range cases {
|
||||
// // Reset the tombstones.
|
||||
// hb.tombstones = newEmptyTombstoneReader()
|
||||
|
||||
// // Delete the ranges.
|
||||
// for _, r := range c.intervals {
|
||||
// require.NoError(t, hb.Delete(r.Mint, r.Maxt, labels.NewEqualMatcher("a", "b")))
|
||||
// }
|
||||
|
||||
// // Compare the result.
|
||||
// q := hb.Querier(0, numSamples)
|
||||
// res := q.Select(labels.NewEqualMatcher("a", "b"))
|
||||
|
||||
// expSamples := make([]sample, 0, len(c.remaint))
|
||||
// for _, ts := range c.remaint {
|
||||
// expSamples = append(expSamples, sample{ts, smpls[ts]})
|
||||
// }
|
||||
|
||||
// expss := newListSeriesSet([]Series{
|
||||
// newSeries(map[string]string{"a": "b"}, expSamples),
|
||||
// })
|
||||
|
||||
// if len(expSamples) == 0 {
|
||||
// require.False(t, res.Next())
|
||||
// continue
|
||||
// }
|
||||
|
||||
// for {
|
||||
// eok, rok := expss.Next(), res.Next()
|
||||
// require.Equal(t, eok, rok, "next")
|
||||
|
||||
// if !eok {
|
||||
// continue Outer
|
||||
// }
|
||||
// sexp := expss.At()
|
||||
// sres := res.At()
|
||||
|
||||
// require.Equal(t, sexp.Labels(), sres.Labels(), "labels")
|
||||
|
||||
// smplExp, errExp := expandSeriesIterator(sexp.Iterator())
|
||||
// smplRes, errRes := expandSeriesIterator(sres.Iterator())
|
||||
|
||||
// require.Equal(t, errExp, errRes, "samples error")
|
||||
// require.Equal(t, smplExp, smplRes, "samples")
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// func TestDeleteUntilCurMax(t *testing.T) {
|
||||
// numSamples := int64(10)
|
||||
|
||||
// dir, _ := ioutil.TempDir("", "test")
|
||||
// defer os.RemoveAll(dir)
|
||||
|
||||
// hb := createTestHead(t, dir, 0, 2*numSamples)
|
||||
// app := hb.Appender()
|
||||
|
||||
// smpls := make([]float64, numSamples)
|
||||
// for i := int64(0); i < numSamples; i++ {
|
||||
// smpls[i] = rand.Float64()
|
||||
// app.Add(labels.Labels{{"a", "b"}}, i, smpls[i])
|
||||
// }
|
||||
|
||||
// require.NoError(t, app.Commit())
|
||||
// require.NoError(t, hb.Delete(0, 10000, labels.NewEqualMatcher("a", "b")))
|
||||
// app = hb.Appender()
|
||||
// _, err := app.Add(labels.Labels{{"a", "b"}}, 11, 1)
|
||||
// require.NoError(t, err)
|
||||
// require.NoError(t, app.Commit())
|
||||
|
||||
// q := hb.Querier(0, 100000)
|
||||
// res := q.Select(labels.NewEqualMatcher("a", "b"))
|
||||
|
||||
// require.True(t, res.Next())
|
||||
// exps := res.At()
|
||||
// it := exps.Iterator()
|
||||
// ressmpls, err := expandSeriesIterator(it)
|
||||
// require.NoError(t, err)
|
||||
// require.Equal(t, []sample{{11, 1}}, ressmpls)
|
||||
// }
|
||||
|
||||
// func TestDelete_e2e(t *testing.T) {
|
||||
// numDatapoints := 1000
|
||||
// numRanges := 1000
|
||||
// timeInterval := int64(2)
|
||||
// maxTime := int64(2 * 1000)
|
||||
// minTime := int64(200)
|
||||
// // Create 8 series with 1000 data-points of different ranges, delete and run queries.
|
||||
// lbls := [][]labels.Label{
|
||||
// {
|
||||
// {"a", "b"},
|
||||
// {"instance", "localhost:9090"},
|
||||
// {"job", "prometheus"},
|
||||
// },
|
||||
// {
|
||||
// {"a", "b"},
|
||||
// {"instance", "127.0.0.1:9090"},
|
||||
// {"job", "prometheus"},
|
||||
// },
|
||||
// {
|
||||
// {"a", "b"},
|
||||
// {"instance", "127.0.0.1:9090"},
|
||||
// {"job", "prom-k8s"},
|
||||
// },
|
||||
// {
|
||||
// {"a", "b"},
|
||||
// {"instance", "localhost:9090"},
|
||||
// {"job", "prom-k8s"},
|
||||
// },
|
||||
// {
|
||||
// {"a", "c"},
|
||||
// {"instance", "localhost:9090"},
|
||||
// {"job", "prometheus"},
|
||||
// },
|
||||
// {
|
||||
// {"a", "c"},
|
||||
// {"instance", "127.0.0.1:9090"},
|
||||
// {"job", "prometheus"},
|
||||
// },
|
||||
// {
|
||||
// {"a", "c"},
|
||||
// {"instance", "127.0.0.1:9090"},
|
||||
// {"job", "prom-k8s"},
|
||||
// },
|
||||
// {
|
||||
// {"a", "c"},
|
||||
// {"instance", "localhost:9090"},
|
||||
// {"job", "prom-k8s"},
|
||||
// },
|
||||
// }
|
||||
|
||||
// seriesMap := map[string][]sample{}
|
||||
// for _, l := range lbls {
|
||||
// seriesMap[labels.New(l...).String()] = []sample{}
|
||||
// }
|
||||
|
||||
// dir, _ := ioutil.TempDir("", "test")
|
||||
// defer os.RemoveAll(dir)
|
||||
|
||||
// hb := createTestHead(t, dir, minTime, maxTime)
|
||||
// app := hb.Appender()
|
||||
|
||||
// for _, l := range lbls {
|
||||
// ls := labels.New(l...)
|
||||
// series := []sample{}
|
||||
|
||||
// ts := rand.Int63n(300)
|
||||
// for i := 0; i < numDatapoints; i++ {
|
||||
// v := rand.Float64()
|
||||
// if ts >= minTime && ts <= maxTime {
|
||||
// series = append(series, sample{ts, v})
|
||||
// }
|
||||
|
||||
// _, err := app.Add(ls, ts, v)
|
||||
// if ts >= minTime && ts <= maxTime {
|
||||
// require.NoError(t, err)
|
||||
// } else {
|
||||
// require.EqualError(t, err, ErrOutOfBounds.Error())
|
||||
// }
|
||||
|
||||
// ts += rand.Int63n(timeInterval) + 1
|
||||
// }
|
||||
|
||||
// seriesMap[labels.New(l...).String()] = series
|
||||
// }
|
||||
|
||||
// require.NoError(t, app.Commit())
|
||||
|
||||
// // Delete a time-range from each-selector.
|
||||
// dels := []struct {
|
||||
// ms []labels.Matcher
|
||||
// drange Intervals
|
||||
// }{
|
||||
// {
|
||||
// ms: []labels.Matcher{labels.NewEqualMatcher("a", "b")},
|
||||
// drange: Intervals{{300, 500}, {600, 670}},
|
||||
// },
|
||||
// {
|
||||
// ms: []labels.Matcher{
|
||||
// labels.NewEqualMatcher("a", "b"),
|
||||
// labels.NewEqualMatcher("job", "prom-k8s"),
|
||||
// },
|
||||
// drange: Intervals{{300, 500}, {100, 670}},
|
||||
// },
|
||||
// {
|
||||
// ms: []labels.Matcher{
|
||||
// labels.NewEqualMatcher("a", "c"),
|
||||
// labels.NewEqualMatcher("instance", "localhost:9090"),
|
||||
// labels.NewEqualMatcher("job", "prometheus"),
|
||||
// },
|
||||
// drange: Intervals{{300, 400}, {100, 6700}},
|
||||
// },
|
||||
// // TODO: Add Regexp Matchers.
|
||||
// }
|
||||
|
||||
// for _, del := range dels {
|
||||
// // Reset the deletes everytime.
|
||||
// writeTombstoneFile(hb.dir, newEmptyTombstoneReader())
|
||||
// hb.tombstones = newEmptyTombstoneReader()
|
||||
|
||||
// for _, r := range del.drange {
|
||||
// require.NoError(t, hb.Delete(r.Mint, r.Maxt, del.ms...))
|
||||
// }
|
||||
|
||||
// matched := labels.Slice{}
|
||||
// for _, ls := range lbls {
|
||||
// s := labels.Selector(del.ms)
|
||||
// if s.Matches(ls) {
|
||||
// matched = append(matched, ls)
|
||||
// }
|
||||
// }
|
||||
|
||||
// sort.Sort(matched)
|
||||
|
||||
// for i := 0; i < numRanges; i++ {
|
||||
// mint := rand.Int63n(200)
|
||||
// maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints))
|
||||
|
||||
// q := hb.Querier(mint, maxt)
|
||||
// ss := q.Select(del.ms...)
|
||||
|
||||
// // Build the mockSeriesSet.
|
||||
// matchedSeries := make([]Series, 0, len(matched))
|
||||
// for _, m := range matched {
|
||||
// smpls := boundedSamples(seriesMap[m.String()], mint, maxt)
|
||||
// smpls = deletedSamples(smpls, del.drange)
|
||||
|
||||
// // Only append those series for which samples exist as mockSeriesSet
|
||||
// // doesn't skip series with no samples.
|
||||
// // TODO: But sometimes SeriesSet returns an empty SeriesIterator
|
||||
// if len(smpls) > 0 {
|
||||
// matchedSeries = append(matchedSeries, newSeries(
|
||||
// m.Map(),
|
||||
// smpls,
|
||||
// ))
|
||||
// }
|
||||
// }
|
||||
// expSs := newListSeriesSet(matchedSeries)
|
||||
|
||||
// // Compare both SeriesSets.
|
||||
// for {
|
||||
// eok, rok := expSs.Next(), ss.Next()
|
||||
|
||||
// // Skip a series if iterator is empty.
|
||||
// if rok {
|
||||
// for !ss.At().Iterator().Next() {
|
||||
// rok = ss.Next()
|
||||
// if !rok {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// require.Equal(t, eok, rok, "next")
|
||||
|
||||
// if !eok {
|
||||
// break
|
||||
// }
|
||||
// sexp := expSs.At()
|
||||
// sres := ss.At()
|
||||
|
||||
// require.Equal(t, sexp.Labels(), sres.Labels(), "labels")
|
||||
|
||||
// smplExp, errExp := expandSeriesIterator(sexp.Iterator())
|
||||
// smplRes, errRes := expandSeriesIterator(sres.Iterator())
|
||||
|
||||
// require.Equal(t, errExp, errRes, "samples error")
|
||||
// require.Equal(t, smplExp, smplRes, "samples")
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// return
|
||||
// }
|
||||
|
||||
func boundedSamples(full []sample, mint, maxt int64) []sample {
|
||||
for len(full) > 0 {
|
||||
|
@ -725,7 +430,6 @@ Outer:
|
|||
continue Outer
|
||||
}
|
||||
}
|
||||
|
||||
ds = append(ds, s)
|
||||
}
|
||||
|
||||
|
|
36
querier.go
36
querier.go
|
@ -54,26 +54,6 @@ type querier struct {
|
|||
blocks []Querier
|
||||
}
|
||||
|
||||
// Querier returns a new querier over the data partition for the given time range.
|
||||
// A goroutine must not handle more than one open Querier.
|
||||
func (s *DB) Querier(mint, maxt int64) Querier {
|
||||
s.mtx.RLock()
|
||||
|
||||
s.headmtx.RLock()
|
||||
blocks := s.blocksForInterval(mint, maxt)
|
||||
s.headmtx.RUnlock()
|
||||
|
||||
sq := &querier{
|
||||
blocks: make([]Querier, 0, len(blocks)),
|
||||
db: s,
|
||||
}
|
||||
for _, b := range blocks {
|
||||
sq.blocks = append(sq.blocks, b.Querier(mint, maxt))
|
||||
}
|
||||
|
||||
return sq
|
||||
}
|
||||
|
||||
func (q *querier) LabelValues(n string) ([]string, error) {
|
||||
return q.lvals(q.blocks, n)
|
||||
}
|
||||
|
@ -700,6 +680,7 @@ type chunkSeriesIterator struct {
|
|||
|
||||
func newChunkSeriesIterator(cs []ChunkMeta, dranges Intervals, mint, maxt int64) *chunkSeriesIterator {
|
||||
it := cs[0].Chunk.Iterator()
|
||||
|
||||
if len(dranges) > 0 {
|
||||
it = &deletedIterator{it: it, intervals: dranges}
|
||||
}
|
||||
|
@ -750,19 +731,22 @@ func (it *chunkSeriesIterator) At() (t int64, v float64) {
|
|||
}
|
||||
|
||||
func (it *chunkSeriesIterator) Next() bool {
|
||||
for it.cur.Next() {
|
||||
if it.cur.Next() {
|
||||
t, _ := it.cur.At()
|
||||
if t < it.mint {
|
||||
return it.Seek(it.mint)
|
||||
}
|
||||
|
||||
if t < it.mint {
|
||||
if !it.Seek(it.mint) {
|
||||
return false
|
||||
}
|
||||
t, _ = it.At()
|
||||
|
||||
return t <= it.maxt
|
||||
}
|
||||
if t > it.maxt {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if err := it.cur.Err(); err != nil {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -1138,6 +1138,17 @@ func TestChunkSeriesIterator_SeekInCurrentChunk(t *testing.T) {
|
|||
require.Equal(t, float64(6), v)
|
||||
}
|
||||
|
||||
// Regression when calling Next() with a time bounded to fit within two samples.
|
||||
// Seek gets called and advances beyond the max time, which was just accepted as a valid sample.
|
||||
func TestChunkSeriesIterator_NextWithMinTime(t *testing.T) {
|
||||
metas := []ChunkMeta{
|
||||
chunkFromSamples([]sample{{1, 6}, {5, 6}, {7, 8}}),
|
||||
}
|
||||
|
||||
it := newChunkSeriesIterator(metas, nil, 2, 4)
|
||||
require.False(t, it.Next())
|
||||
}
|
||||
|
||||
func TestPopulatedCSReturnsValidChunkSlice(t *testing.T) {
|
||||
lbls := []labels.Labels{labels.New(labels.Label{"a", "b"})}
|
||||
chunkMetas := [][]ChunkMeta{
|
||||
|
|
17
wal.go
17
wal.go
|
@ -84,9 +84,19 @@ type WAL interface {
|
|||
LogSeries([]labels.Labels) error
|
||||
LogSamples([]RefSample) error
|
||||
LogDeletes([]Stone) error
|
||||
Truncate(maxt int64) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
type NopWAL struct{}
|
||||
|
||||
func (NopWAL) Read(SeriesCB, SamplesCB, DeletesCB) error { return nil }
|
||||
func (w NopWAL) Reader() WALReader { return w }
|
||||
func (NopWAL) LogSeries([]labels.Labels) error { return nil }
|
||||
func (NopWAL) LogSamples([]RefSample) error { return nil }
|
||||
func (NopWAL) LogDeletes([]Stone) error { return nil }
|
||||
func (NopWAL) Close() error { return nil }
|
||||
|
||||
// WALReader reads entries from a WAL.
|
||||
type WALReader interface {
|
||||
Read(SeriesCB, SamplesCB, DeletesCB) error
|
||||
|
@ -319,6 +329,10 @@ func (w *SegmentWAL) Sync() error {
|
|||
return fileutil.Fdatasync(tail)
|
||||
}
|
||||
|
||||
func (w *SegmentWAL) Truncate(maxt int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *SegmentWAL) sync() error {
|
||||
if err := w.flush(); err != nil {
|
||||
return err
|
||||
|
@ -360,9 +374,8 @@ func (w *SegmentWAL) Close() error {
|
|||
close(w.stopc)
|
||||
<-w.donec
|
||||
|
||||
// Lock mutex and leave it locked so we panic if there's a bug causing
|
||||
// the block to be used afterwards.
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
|
||||
if err := w.sync(); err != nil {
|
||||
return err
|
||||
|
|
Loading…
Reference in a new issue