Remove multiple heads

This changes the structure to a single WAL backed by a single head
block.
Parts of the head block can be compacted. This relieves us from any head
amangement and greatly simplifies any consistency and isolation concerns
by just having a single head.
This commit is contained in:
Fabian Reinartz 2017-08-29 00:39:17 +02:00
parent 0fe67df9f2
commit 3901b6e70b
9 changed files with 1455 additions and 1580 deletions

View file

@ -26,14 +26,21 @@ import (
"github.com/prometheus/tsdb/labels" "github.com/prometheus/tsdb/labels"
) )
// DiskBlock handles reads against a Block of time series data.
type DiskBlock interface { type DiskBlock interface {
BlockReader
// Directory where block data is stored. // Directory where block data is stored.
Dir() string Dir() string
// Stats returns statistics about the block. // Stats returns statistics about the block.
Meta() BlockMeta Meta() BlockMeta
Delete(mint, maxt int64, m ...labels.Matcher) error
Close() error
}
type BlockReader interface {
// Index returns an IndexReader over the block's data. // Index returns an IndexReader over the block's data.
Index() IndexReader Index() IndexReader
@ -42,31 +49,14 @@ type DiskBlock interface {
// Tombstones returns a TombstoneReader over the block's deleted data. // Tombstones returns a TombstoneReader over the block's deleted data.
Tombstones() TombstoneReader Tombstones() TombstoneReader
// Delete deletes data from the block.
Delete(mint, maxt int64, ms ...labels.Matcher) error
// Close releases all underlying resources of the block.
Close() error
} }
// Block is an interface to a DiskBlock that can also be queried. // // Block is an interface to a DiskBlock that can also be queried.
type Block interface { // type Block interface {
DiskBlock // DiskBlock
Queryable // Queryable
Snapshottable // Snapshottable
} // }
// headBlock is a regular block that can still be appended to.
type headBlock interface {
Block
Appendable
// ActiveWriters returns the number of currently active appenders.
ActiveWriters() int
// HighTimestamp returns the highest currently inserted timestamp.
HighTimestamp() int64
}
// Snapshottable defines an entity that can be backedup online. // Snapshottable defines an entity that can be backedup online.
type Snapshottable interface { type Snapshottable interface {
@ -225,16 +215,6 @@ func (pb *persistedBlock) String() string {
return pb.meta.ULID.String() return pb.meta.ULID.String()
} }
func (pb *persistedBlock) Querier(mint, maxt int64) Querier {
return &blockQuerier{
mint: mint,
maxt: maxt,
index: pb.Index(),
chunks: pb.Chunks(),
tombstones: pb.Tombstones(),
}
}
func (pb *persistedBlock) Dir() string { return pb.dir } func (pb *persistedBlock) Dir() string { return pb.dir }
func (pb *persistedBlock) Index() IndexReader { return pb.indexr } func (pb *persistedBlock) Index() IndexReader { return pb.indexr }
func (pb *persistedBlock) Chunks() ChunkReader { return pb.chunkr } func (pb *persistedBlock) Chunks() ChunkReader { return pb.chunkr }

View file

@ -14,7 +14,6 @@
package tsdb package tsdb
import ( import (
"fmt"
"math/rand" "math/rand"
"os" "os"
"path/filepath" "path/filepath"
@ -51,7 +50,7 @@ type Compactor interface {
Plan(dir string) ([]string, error) Plan(dir string) ([]string, error)
// Write persists a Block into a directory. // Write persists a Block into a directory.
Write(dest string, b Block) error Write(dest string, b BlockReader, mint, maxt int64) error
// Compact runs compaction against the provided directories. Must // Compact runs compaction against the provided directories. Must
// only be called concurrently with results of Plan(). // only be called concurrently with results of Plan().
@ -124,8 +123,6 @@ type compactionInfo struct {
mint, maxt int64 mint, maxt int64
} }
const compactionBlocksLen = 3
type dirMeta struct { type dirMeta struct {
dir string dir string
meta *BlockMeta meta *BlockMeta
@ -258,9 +255,12 @@ func splitByRange(ds []dirMeta, tr int64) [][]dirMeta {
return splitDirs return splitDirs
} }
func compactBlockMetas(blocks ...BlockMeta) (res BlockMeta) { func compactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
res.MinTime = blocks[0].MinTime res := &BlockMeta{
res.MaxTime = blocks[len(blocks)-1].MaxTime ULID: uid,
MinTime: blocks[0].MinTime,
MaxTime: blocks[len(blocks)-1].MaxTime,
}
sources := map[ulid.ULID]struct{}{} sources := map[ulid.ULID]struct{}{}
@ -271,10 +271,6 @@ func compactBlockMetas(blocks ...BlockMeta) (res BlockMeta) {
for _, s := range b.Compaction.Sources { for _, s := range b.Compaction.Sources {
sources[s] = struct{}{} sources[s] = struct{}{}
} }
// If it's an in memory block, its ULID goes into the sources.
if b.Compaction.Level == 0 {
sources[b.ULID] = struct{}{}
}
} }
res.Compaction.Level++ res.Compaction.Level++
@ -291,7 +287,8 @@ func compactBlockMetas(blocks ...BlockMeta) (res BlockMeta) {
// Compact creates a new block in the compactor's directory from the blocks in the // Compact creates a new block in the compactor's directory from the blocks in the
// provided directories. // provided directories.
func (c *LeveledCompactor) Compact(dest string, dirs ...string) (err error) { func (c *LeveledCompactor) Compact(dest string, dirs ...string) (err error) {
var blocks []Block var blocks []BlockReader
var metas []*BlockMeta
for _, d := range dirs { for _, d := range dirs {
b, err := newPersistedBlock(d, c.opts.chunkPool) b, err := newPersistedBlock(d, c.opts.chunkPool)
@ -300,31 +297,40 @@ func (c *LeveledCompactor) Compact(dest string, dirs ...string) (err error) {
} }
defer b.Close() defer b.Close()
meta, err := readMetaFile(d)
if err != nil {
return err
}
metas = append(metas, meta)
blocks = append(blocks, b) blocks = append(blocks, b)
} }
entropy := rand.New(rand.NewSource(time.Now().UnixNano())) entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
uid := ulid.MustNew(ulid.Now(), entropy) uid := ulid.MustNew(ulid.Now(), entropy)
return c.write(dest, uid, blocks...) return c.write(dest, compactBlockMetas(uid, metas...), blocks...)
} }
func (c *LeveledCompactor) Write(dest string, b Block) error { func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64) error {
// Buffering blocks might have been created that often have no data.
if b.Meta().Stats.NumSeries == 0 {
return nil
}
entropy := rand.New(rand.NewSource(time.Now().UnixNano())) entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
uid := ulid.MustNew(ulid.Now(), entropy) uid := ulid.MustNew(ulid.Now(), entropy)
return c.write(dest, uid, b) meta := &BlockMeta{
ULID: uid,
MinTime: mint,
MaxTime: maxt,
}
meta.Compaction.Level = 1
meta.Compaction.Sources = []ulid.ULID{uid}
return c.write(dest, meta, b)
} }
// write creates a new block that is the union of the provided blocks into dir. // write creates a new block that is the union of the provided blocks into dir.
// It cleans up all files of the old blocks after completing successfully. // It cleans up all files of the old blocks after completing successfully.
func (c *LeveledCompactor) write(dest string, uid ulid.ULID, blocks ...Block) (err error) { func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockReader) (err error) {
c.logger.Log("msg", "compact blocks", "blocks", fmt.Sprintf("%v", blocks)) c.logger.Log("msg", "compact blocks", "count", len(blocks), "mint", meta.MinTime, "maxt", meta.MaxTime)
defer func(t time.Time) { defer func(t time.Time) {
if err != nil { if err != nil {
@ -334,7 +340,7 @@ func (c *LeveledCompactor) write(dest string, uid ulid.ULID, blocks ...Block) (e
c.metrics.duration.Observe(time.Since(t).Seconds()) c.metrics.duration.Observe(time.Since(t).Seconds())
}(time.Now()) }(time.Now())
dir := filepath.Join(dest, uid.String()) dir := filepath.Join(dest, meta.ULID.String())
tmp := dir + ".tmp" tmp := dir + ".tmp"
if err = os.RemoveAll(tmp); err != nil { if err = os.RemoveAll(tmp); err != nil {
@ -356,11 +362,9 @@ func (c *LeveledCompactor) write(dest string, uid ulid.ULID, blocks ...Block) (e
return errors.Wrap(err, "open index writer") return errors.Wrap(err, "open index writer")
} }
meta, err := c.populateBlock(blocks, indexw, chunkw) if err := c.populateBlock(blocks, meta, indexw, chunkw); err != nil {
if err != nil {
return errors.Wrap(err, "write compaction") return errors.Wrap(err, "write compaction")
} }
meta.ULID = uid
if err = writeMetaFile(tmp, meta); err != nil { if err = writeMetaFile(tmp, meta); err != nil {
return errors.Wrap(err, "write merged meta") return errors.Wrap(err, "write merged meta")
@ -398,18 +402,16 @@ func (c *LeveledCompactor) write(dest string, uid ulid.ULID, blocks ...Block) (e
// populateBlock fills the index and chunk writers with new data gathered as the union // populateBlock fills the index and chunk writers with new data gathered as the union
// of the provided blocks. It returns meta information for the new block. // of the provided blocks. It returns meta information for the new block.
func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chunkw ChunkWriter) (*BlockMeta, error) { func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error {
var ( var (
set compactionSet set compactionSet
metas []BlockMeta
allSymbols = make(map[string]struct{}, 1<<16) allSymbols = make(map[string]struct{}, 1<<16)
) )
for i, b := range blocks { for i, b := range blocks {
metas = append(metas, b.Meta())
symbols, err := b.Index().Symbols() symbols, err := b.Index().Symbols()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "read symbols") return errors.Wrap(err, "read symbols")
} }
for s := range symbols { for s := range symbols {
allSymbols[s] = struct{}{} allSymbols[s] = struct{}{}
@ -419,7 +421,7 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
all, err := indexr.Postings("", "") all, err := indexr.Postings("", "")
if err != nil { if err != nil {
return nil, err return err
} }
all = indexr.SortedPostings(all) all = indexr.SortedPostings(all)
@ -431,7 +433,7 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
} }
set, err = newCompactionMerger(set, s) set, err = newCompactionMerger(set, s)
if err != nil { if err != nil {
return nil, err return err
} }
} }
@ -440,11 +442,10 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
postings = &memPostings{m: make(map[term][]uint32, 512)} postings = &memPostings{m: make(map[term][]uint32, 512)}
values = map[string]stringset{} values = map[string]stringset{}
i = uint32(0) i = uint32(0)
meta = compactBlockMetas(metas...)
) )
if err := indexw.AddSymbols(allSymbols); err != nil { if err := indexw.AddSymbols(allSymbols); err != nil {
return nil, errors.Wrap(err, "add symbols") return errors.Wrap(err, "add symbols")
} }
for set.Next() { for set.Next() {
@ -462,7 +463,7 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
newChunk := chunks.NewXORChunk() newChunk := chunks.NewXORChunk()
app, err := newChunk.Appender() app, err := newChunk.Appender()
if err != nil { if err != nil {
return nil, err return err
} }
it := &deletedIterator{it: chk.Chunk.Iterator(), intervals: dranges} it := &deletedIterator{it: chk.Chunk.Iterator(), intervals: dranges}
@ -476,11 +477,11 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
} }
} }
if err := chunkw.WriteChunks(chks...); err != nil { if err := chunkw.WriteChunks(chks...); err != nil {
return nil, err return errors.Wrap(err, "write chunks")
} }
if err := indexw.AddSeries(i, lset, chks...); err != nil { if err := indexw.AddSeries(i, lset, chks...); err != nil {
return nil, errors.Wrapf(err, "add series") return errors.Wrap(err, "add series")
} }
meta.Stats.NumChunks += uint64(len(chks)) meta.Stats.NumChunks += uint64(len(chks))
@ -508,7 +509,7 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
i++ i++
} }
if set.Err() != nil { if set.Err() != nil {
return nil, set.Err() return errors.Wrap(set.Err(), "iterate compaction set")
} }
s := make([]string, 0, 256) s := make([]string, 0, 256)
@ -519,13 +520,13 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
s = append(s, x) s = append(s, x)
} }
if err := indexw.WriteLabelIndex([]string{n}, s); err != nil { if err := indexw.WriteLabelIndex([]string{n}, s); err != nil {
return nil, err return errors.Wrap(err, "write label index")
} }
} }
for t := range postings.m { for t := range postings.m {
if err := indexw.WritePostings(t.name, t.value, postings.get(t)); err != nil { if err := indexw.WritePostings(t.name, t.value, postings.get(t)); err != nil {
return nil, err return errors.Wrap(err, "write postings")
} }
} }
// Write a postings list containing all series. // Write a postings list containing all series.
@ -534,10 +535,10 @@ func (c *LeveledCompactor) populateBlock(blocks []Block, indexw IndexWriter, chu
all[i] = uint32(i) all[i] = uint32(i)
} }
if err := indexw.WritePostings("", "", newListPostings(all)); err != nil { if err := indexw.WritePostings("", "", newListPostings(all)); err != nil {
return nil, err return errors.Wrap(err, "write 'all' postings")
} }
return &meta, nil return nil
} }
type compactionSet interface { type compactionSet interface {
@ -572,9 +573,12 @@ func (c *compactionSeriesSet) Next() bool {
if !c.p.Next() { if !c.p.Next() {
return false return false
} }
var err error
c.intervals = c.tombstones.Get(c.p.At()) c.intervals = c.tombstones.Get(c.p.At())
if c.err = c.index.Series(c.p.At(), &c.l, &c.c); c.err != nil { if err = c.index.Series(c.p.At(), &c.l, &c.c); err != nil {
c.err = errors.Wrapf(err, "get series %d", c.p.At())
return false return false
} }
@ -593,8 +597,9 @@ func (c *compactionSeriesSet) Next() bool {
for i := range c.c { for i := range c.c {
chk := &c.c[i] chk := &c.c[i]
chk.Chunk, c.err = c.chunks.Chunk(chk.Ref) chk.Chunk, err = c.chunks.Chunk(chk.Ref)
if c.err != nil { if err != nil {
c.err = errors.Wrapf(err, "chunk %d not found", chk.Ref)
return false return false
} }
} }

765
db.go

File diff suppressed because it is too large Load diff

View file

@ -15,8 +15,10 @@ package tsdb
import ( import (
"io/ioutil" "io/ioutil"
"math"
"math/rand" "math/rand"
"os" "os"
"sort"
"testing" "testing"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -24,8 +26,20 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func openTestDB(t testing.TB, opts *Options) (db *DB, close func()) {
tmpdir, _ := ioutil.TempDir("", "test")
db, err := Open(tmpdir, nil, nil, opts)
require.NoError(t, err)
// Do not close the test database by default as it will deadlock on test failures.
return db, func() {
os.RemoveAll(tmpdir)
}
}
// Convert a SeriesSet into a form useable with reflect.DeepEqual. // Convert a SeriesSet into a form useable with reflect.DeepEqual.
func readSeriesSet(ss SeriesSet) (map[string][]sample, error) { func readSeriesSet(t testing.TB, ss SeriesSet) map[string][]sample {
result := map[string][]sample{} result := map[string][]sample{}
for ss.Next() { for ss.Next() {
@ -37,31 +51,28 @@ func readSeriesSet(ss SeriesSet) (map[string][]sample, error) {
t, v := it.At() t, v := it.At()
samples = append(samples, sample{t: t, v: v}) samples = append(samples, sample{t: t, v: v})
} }
require.NoError(t, it.Err())
name := series.Labels().String() name := series.Labels().String()
result[name] = samples result[name] = samples
if err := ss.Err(); err != nil {
return nil, err
}
} }
return result, nil require.NoError(t, ss.Err())
return result
} }
func TestDataAvailableOnlyAfterCommit(t *testing.T) { func TestDataAvailableOnlyAfterCommit(t *testing.T) {
tmpdir, _ := ioutil.TempDir("", "test") db, close := openTestDB(t, nil)
defer os.RemoveAll(tmpdir) defer close()
db, err := Open(tmpdir, nil, nil, nil)
require.NoError(t, err)
defer db.Close()
app := db.Appender() app := db.Appender()
_, err = app.Add(labels.FromStrings("foo", "bar"), 0, 0)
_, err := app.Add(labels.FromStrings("foo", "bar"), 0, 0)
require.NoError(t, err) require.NoError(t, err)
querier := db.Querier(0, 1) querier := db.Querier(0, 1)
seriesSet, err := readSeriesSet(querier.Select(labels.NewEqualMatcher("foo", "bar"))) seriesSet := readSeriesSet(t, querier.Select(labels.NewEqualMatcher("foo", "bar")))
require.NoError(t, err)
require.Equal(t, seriesSet, map[string][]sample{}) require.Equal(t, seriesSet, map[string][]sample{})
require.NoError(t, querier.Close()) require.NoError(t, querier.Close())
@ -71,23 +82,17 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) {
querier = db.Querier(0, 1) querier = db.Querier(0, 1)
defer querier.Close() defer querier.Close()
seriesSet, err = readSeriesSet(querier.Select(labels.NewEqualMatcher("foo", "bar"))) seriesSet = readSeriesSet(t, querier.Select(labels.NewEqualMatcher("foo", "bar")))
require.NoError(t, err)
require.Equal(t, seriesSet, map[string][]sample{`{foo="bar"}`: []sample{{t: 0, v: 0}}}) require.Equal(t, seriesSet, map[string][]sample{`{foo="bar"}`: []sample{{t: 0, v: 0}}})
} }
func TestDataNotAvailableAfterRollback(t *testing.T) { func TestDataNotAvailableAfterRollback(t *testing.T) {
tmpdir, _ := ioutil.TempDir("", "test") db, close := openTestDB(t, nil)
defer os.RemoveAll(tmpdir) defer close()
db, err := Open(tmpdir, nil, nil, nil)
if err != nil {
t.Fatalf("Error opening database: %q", err)
}
defer db.Close()
app := db.Appender() app := db.Appender()
_, err = app.Add(labels.FromStrings("foo", "bar"), 0, 0) _, err := app.Add(labels.FromStrings("foo", "bar"), 0, 0)
require.NoError(t, err) require.NoError(t, err)
err = app.Rollback() err = app.Rollback()
@ -96,22 +101,18 @@ func TestDataNotAvailableAfterRollback(t *testing.T) {
querier := db.Querier(0, 1) querier := db.Querier(0, 1)
defer querier.Close() defer querier.Close()
seriesSet, err := readSeriesSet(querier.Select(labels.NewEqualMatcher("foo", "bar"))) seriesSet := readSeriesSet(t, querier.Select(labels.NewEqualMatcher("foo", "bar")))
require.NoError(t, err)
require.Equal(t, seriesSet, map[string][]sample{}) require.Equal(t, seriesSet, map[string][]sample{})
} }
func TestDBAppenderAddRef(t *testing.T) { func TestDBAppenderAddRef(t *testing.T) {
tmpdir, _ := ioutil.TempDir("", "test") db, close := openTestDB(t, nil)
defer os.RemoveAll(tmpdir) defer close()
db, err := Open(tmpdir, nil, nil, nil)
require.NoError(t, err)
defer db.Close()
app1 := db.Appender() app1 := db.Appender()
ref, err := app1.Add(labels.FromStrings("a", "b"), 0, 0) ref, err := app1.Add(labels.FromStrings("a", "b"), 123, 0)
require.NoError(t, err) require.NoError(t, err)
// When a series is first created, refs don't work within that transaction. // When a series is first created, refs don't work within that transaction.
@ -122,35 +123,40 @@ func TestDBAppenderAddRef(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
app2 := db.Appender() app2 := db.Appender()
defer app2.Rollback() ref, err = app2.Add(labels.FromStrings("a", "b"), 133, 1)
ref, err = app2.Add(labels.FromStrings("a", "b"), 1, 1)
require.NoError(t, err) require.NoError(t, err)
// Ref must be prefixed with block ULID of the block we wrote to.
id := db.blocks[len(db.blocks)-1].Meta().ULID
require.Equal(t, string(id[:]), ref[:16])
// Reference must be valid to add another sample. // Reference must be valid to add another sample.
err = app2.AddFast(ref, 2, 2) err = app2.AddFast(ref, 143, 2)
require.NoError(t, err) require.NoError(t, err)
// AddFast for the same timestamp must fail if the generation in the reference // AddFast for the same timestamp must fail if the generation in the reference
// doesn't add up. // doesn't add up.
refb := []byte(ref) err = app2.AddFast("abc_invalid_xyz", 1, 1)
refb[15] ^= refb[15]
err = app2.AddFast(string(refb), 1, 1)
require.EqualError(t, errors.Cause(err), ErrNotFound.Error()) require.EqualError(t, errors.Cause(err), ErrNotFound.Error())
require.NoError(t, app2.Commit())
q := db.Querier(0, 200)
res := readSeriesSet(t, q.Select(labels.NewEqualMatcher("a", "b")))
require.Equal(t, map[string][]sample{
labels.FromStrings("a", "b").String(): []sample{
{t: 123, v: 0},
{t: 133, v: 1},
{t: 143, v: 2},
},
}, res)
require.NoError(t, q.Close())
} }
func TestDeleteSimple(t *testing.T) { func TestDeleteSimple(t *testing.T) {
numSamples := int64(10) numSamples := int64(10)
tmpdir, _ := ioutil.TempDir("", "test") db, close := openTestDB(t, nil)
defer os.RemoveAll(tmpdir) defer close()
db, err := Open(tmpdir, nil, nil, nil)
require.NoError(t, err)
app := db.Appender() app := db.Appender()
smpls := make([]float64, numSamples) smpls := make([]float64, numSamples)
@ -216,3 +222,246 @@ Outer:
} }
} }
} }
func TestAmendDatapointCausesError(t *testing.T) {
db, close := openTestDB(t, nil)
defer close()
app := db.Appender()
_, err := app.Add(labels.Labels{}, 0, 0)
require.NoError(t, err, "Failed to add sample")
require.NoError(t, app.Commit(), "Unexpected error committing appender")
app = db.Appender()
_, err = app.Add(labels.Labels{}, 0, 1)
require.Equal(t, ErrAmendSample, err)
require.NoError(t, app.Rollback(), "Unexpected error rolling back appender")
}
func TestDuplicateNaNDatapointNoAmendError(t *testing.T) {
db, close := openTestDB(t, nil)
defer close()
app := db.Appender()
_, err := app.Add(labels.Labels{}, 0, math.NaN())
require.NoError(t, err, "Failed to add sample")
require.NoError(t, app.Commit(), "Unexpected error committing appender")
app = db.Appender()
_, err = app.Add(labels.Labels{}, 0, math.NaN())
require.NoError(t, err)
}
func TestNonDuplicateNaNDatapointsCausesAmendError(t *testing.T) {
db, close := openTestDB(t, nil)
defer close()
app := db.Appender()
_, err := app.Add(labels.Labels{}, 0, math.Float64frombits(0x7ff0000000000001))
require.NoError(t, err, "Failed to add sample")
require.NoError(t, app.Commit(), "Unexpected error committing appender")
app = db.Appender()
_, err = app.Add(labels.Labels{}, 0, math.Float64frombits(0x7ff0000000000002))
require.Equal(t, ErrAmendSample, err)
}
func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
db, close := openTestDB(t, nil)
defer close()
// Append AmendedValue.
app := db.Appender()
_, err := app.Add(labels.Labels{{"a", "b"}}, 0, 1)
require.NoError(t, err)
_, err = app.Add(labels.Labels{{"a", "b"}}, 0, 2)
require.NoError(t, err)
require.NoError(t, app.Commit())
// Make sure the right value is stored.
q := db.Querier(0, 10)
ss := q.Select(labels.NewEqualMatcher("a", "b"))
ssMap := readSeriesSet(t, ss)
require.Equal(t, map[string][]sample{
labels.New(labels.Label{"a", "b"}).String(): []sample{{0, 1}},
}, ssMap)
require.NoError(t, q.Close())
// Append Out of Order Value.
app = db.Appender()
_, err = app.Add(labels.Labels{{"a", "b"}}, 10, 3)
require.NoError(t, err)
_, err = app.Add(labels.Labels{{"a", "b"}}, 7, 5)
require.NoError(t, err)
require.NoError(t, app.Commit())
q = db.Querier(0, 10)
ss = q.Select(labels.NewEqualMatcher("a", "b"))
ssMap = readSeriesSet(t, ss)
require.Equal(t, map[string][]sample{
labels.New(labels.Label{"a", "b"}).String(): []sample{{0, 1}, {10, 3}},
}, ssMap)
require.NoError(t, q.Close())
}
func TestDB_e2e(t *testing.T) {
const (
numDatapoints = 1000
numRanges = 1000
timeInterval = int64(3)
maxTime = int64(2 * 1000)
minTime = int64(200)
)
// Create 8 series with 1000 data-points of different ranges and run queries.
lbls := [][]labels.Label{
{
{"a", "b"},
{"instance", "localhost:9090"},
{"job", "prometheus"},
},
{
{"a", "b"},
{"instance", "127.0.0.1:9090"},
{"job", "prometheus"},
},
{
{"a", "b"},
{"instance", "127.0.0.1:9090"},
{"job", "prom-k8s"},
},
{
{"a", "b"},
{"instance", "localhost:9090"},
{"job", "prom-k8s"},
},
{
{"a", "c"},
{"instance", "localhost:9090"},
{"job", "prometheus"},
},
{
{"a", "c"},
{"instance", "127.0.0.1:9090"},
{"job", "prometheus"},
},
{
{"a", "c"},
{"instance", "127.0.0.1:9090"},
{"job", "prom-k8s"},
},
{
{"a", "c"},
{"instance", "localhost:9090"},
{"job", "prom-k8s"},
},
}
seriesMap := map[string][]sample{}
for _, l := range lbls {
seriesMap[labels.New(l...).String()] = []sample{}
}
db, close := openTestDB(t, nil)
defer close()
app := db.Appender()
for _, l := range lbls {
lset := labels.New(l...)
series := []sample{}
ts := rand.Int63n(300)
for i := 0; i < numDatapoints; i++ {
v := rand.Float64()
series = append(series, sample{ts, v})
_, err := app.Add(lset, ts, v)
require.NoError(t, err)
ts += rand.Int63n(timeInterval) + 1
}
seriesMap[lset.String()] = series
}
require.NoError(t, app.Commit())
// Query each selector on 1000 random time-ranges.
queries := []struct {
ms []labels.Matcher
}{
{
ms: []labels.Matcher{labels.NewEqualMatcher("a", "b")},
},
{
ms: []labels.Matcher{
labels.NewEqualMatcher("a", "b"),
labels.NewEqualMatcher("job", "prom-k8s"),
},
},
{
ms: []labels.Matcher{
labels.NewEqualMatcher("a", "c"),
labels.NewEqualMatcher("instance", "localhost:9090"),
labels.NewEqualMatcher("job", "prometheus"),
},
},
// TODO: Add Regexp Matchers.
}
for _, qry := range queries {
matched := labels.Slice{}
for _, ls := range lbls {
s := labels.Selector(qry.ms)
if s.Matches(ls) {
matched = append(matched, ls)
}
}
sort.Sort(matched)
for i := 0; i < numRanges; i++ {
mint := rand.Int63n(300)
maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints))
t.Logf("run query %s, [%d, %d]", qry.ms, mint, maxt)
expected := map[string][]sample{}
// Build the mockSeriesSet.
for _, m := range matched {
smpls := boundedSamples(seriesMap[m.String()], mint, maxt)
if len(smpls) > 0 {
expected[m.String()] = smpls
}
}
q := db.Querier(mint, maxt)
ss := q.Select(qry.ms...)
result := map[string][]sample{}
for ss.Next() {
x := ss.At()
smpls, err := expandSeriesIterator(x.Iterator())
require.NoError(t, err)
if len(smpls) > 0 {
result[x.Labels().String()] = smpls
}
}
require.NoError(t, ss.Err())
require.Equal(t, expected, result)
q.Close()
}
}
return
}

782
head.go

File diff suppressed because it is too large Load diff

View file

@ -15,12 +15,8 @@ package tsdb
import ( import (
"io/ioutil" "io/ioutil"
"math"
"math/rand"
"os" "os"
"sort"
"testing" "testing"
"time"
"unsafe" "unsafe"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -31,23 +27,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
// createTestHeadBlock creates a new head block with a SegmentWAL.
func createTestHeadBlock(t testing.TB, dir string, mint, maxt int64) *HeadBlock {
dir, err := TouchHeadBlock(dir, mint, maxt)
require.NoError(t, err)
return openTestHeadBlock(t, dir)
}
func openTestHeadBlock(t testing.TB, dir string) *HeadBlock {
wal, err := OpenSegmentWAL(dir, nil, 5*time.Second)
require.NoError(t, err)
h, err := OpenHeadBlock(dir, nil, wal, nil)
require.NoError(t, err)
return h
}
func BenchmarkCreateSeries(b *testing.B) { func BenchmarkCreateSeries(b *testing.B) {
lbls, err := readPrometheusLabels("cmd/tsdb/testdata.1m", 1e6) lbls, err := readPrometheusLabels("cmd/tsdb/testdata.1m", 1e6)
require.NoError(b, err) require.NoError(b, err)
@ -57,7 +36,10 @@ func BenchmarkCreateSeries(b *testing.B) {
require.NoError(b, err) require.NoError(b, err)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
h := createTestHeadBlock(b, dir, 0, 1) h, err := NewHead(nil, nil, 10000)
if err != nil {
require.NoError(b, err)
}
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
@ -106,598 +88,321 @@ func readPrometheusLabels(fn string, n int) ([]labels.Labels, error) {
return mets, nil return mets, nil
} }
func TestAmendDatapointCausesError(t *testing.T) { // func TestHBDeleteSimple(t *testing.T) {
dir, _ := ioutil.TempDir("", "test") // numSamples := int64(10)
defer os.RemoveAll(dir)
// hb, close := openTestDB(t, nil)
hb := createTestHeadBlock(t, dir, 0, 1000) // defer close()
app := hb.Appender() // app := hb.Appender()
_, err := app.Add(labels.Labels{}, 0, 0)
require.NoError(t, err, "Failed to add sample") // smpls := make([]float64, numSamples)
require.NoError(t, app.Commit(), "Unexpected error committing appender") // for i := int64(0); i < numSamples; i++ {
// smpls[i] = rand.Float64()
app = hb.Appender() // app.Add(labels.Labels{{"a", "b"}}, i, smpls[i])
_, err = app.Add(labels.Labels{}, 0, 1) // }
require.Equal(t, ErrAmendSample, err)
} // require.NoError(t, app.Commit())
// cases := []struct {
func TestDuplicateNaNDatapointNoAmendError(t *testing.T) { // intervals Intervals
dir, _ := ioutil.TempDir("", "test") // remaint []int64
defer os.RemoveAll(dir) // }{
// {
hb := createTestHeadBlock(t, dir, 0, 1000) // intervals: Intervals{{0, 3}},
// remaint: []int64{4, 5, 6, 7, 8, 9},
app := hb.Appender() // },
_, err := app.Add(labels.Labels{}, 0, math.NaN()) // {
require.NoError(t, err, "Failed to add sample") // intervals: Intervals{{1, 3}},
require.NoError(t, app.Commit(), "Unexpected error committing appender") // remaint: []int64{0, 4, 5, 6, 7, 8, 9},
// },
app = hb.Appender() // {
_, err = app.Add(labels.Labels{}, 0, math.NaN()) // intervals: Intervals{{1, 3}, {4, 7}},
require.NoError(t, err) // remaint: []int64{0, 8, 9},
} // },
// {
func TestNonDuplicateNaNDatapointsCausesAmendError(t *testing.T) { // intervals: Intervals{{1, 3}, {4, 700}},
dir, _ := ioutil.TempDir("", "test") // remaint: []int64{0},
defer os.RemoveAll(dir) // },
// {
hb := createTestHeadBlock(t, dir, 0, 1000) // intervals: Intervals{{0, 9}},
// remaint: []int64{},
app := hb.Appender() // },
_, err := app.Add(labels.Labels{}, 0, math.Float64frombits(0x7ff0000000000001)) // }
require.NoError(t, err, "Failed to add sample")
require.NoError(t, app.Commit(), "Unexpected error committing appender") // Outer:
// for _, c := range cases {
app = hb.Appender() // // Reset the tombstones.
_, err = app.Add(labels.Labels{}, 0, math.Float64frombits(0x7ff0000000000002)) // hb.tombstones = newEmptyTombstoneReader()
require.Equal(t, ErrAmendSample, err)
} // // Delete the ranges.
// for _, r := range c.intervals {
func TestSkippingInvalidValuesInSameTxn(t *testing.T) { // require.NoError(t, hb.Delete(r.Mint, r.Maxt, labels.NewEqualMatcher("a", "b")))
dir, _ := ioutil.TempDir("", "test") // }
defer os.RemoveAll(dir)
// // Compare the result.
hb := createTestHeadBlock(t, dir, 0, 1000) // q := hb.Querier(0, numSamples)
// res := q.Select(labels.NewEqualMatcher("a", "b"))
// Append AmendedValue.
app := hb.Appender() // expSamples := make([]sample, 0, len(c.remaint))
_, err := app.Add(labels.Labels{{"a", "b"}}, 0, 1) // for _, ts := range c.remaint {
require.NoError(t, err) // expSamples = append(expSamples, sample{ts, smpls[ts]})
_, err = app.Add(labels.Labels{{"a", "b"}}, 0, 2) // }
require.NoError(t, err)
require.NoError(t, app.Commit()) // expss := newListSeriesSet([]Series{
require.Equal(t, uint64(1), hb.Meta().Stats.NumSamples) // newSeries(map[string]string{"a": "b"}, expSamples),
// })
// Make sure the right value is stored.
q := hb.Querier(0, 10) // if len(expSamples) == 0 {
ss := q.Select(labels.NewEqualMatcher("a", "b")) // require.False(t, res.Next())
ssMap, err := readSeriesSet(ss) // continue
require.NoError(t, err) // }
require.Equal(t, map[string][]sample{ // for {
labels.New(labels.Label{"a", "b"}).String(): []sample{{0, 1}}, // eok, rok := expss.Next(), res.Next()
}, ssMap) // require.Equal(t, eok, rok, "next")
require.NoError(t, q.Close()) // if !eok {
// continue Outer
// Append Out of Order Value. // }
app = hb.Appender() // sexp := expss.At()
_, err = app.Add(labels.Labels{{"a", "b"}}, 10, 3) // sres := res.At()
require.NoError(t, err)
_, err = app.Add(labels.Labels{{"a", "b"}}, 7, 5) // require.Equal(t, sexp.Labels(), sres.Labels(), "labels")
require.NoError(t, err)
require.NoError(t, app.Commit()) // smplExp, errExp := expandSeriesIterator(sexp.Iterator())
require.Equal(t, uint64(2), hb.Meta().Stats.NumSamples) // smplRes, errRes := expandSeriesIterator(sres.Iterator())
q = hb.Querier(0, 10) // require.Equal(t, errExp, errRes, "samples error")
ss = q.Select(labels.NewEqualMatcher("a", "b")) // require.Equal(t, smplExp, smplRes, "samples")
ssMap, err = readSeriesSet(ss) // }
require.NoError(t, err) // }
// }
require.Equal(t, map[string][]sample{
labels.New(labels.Label{"a", "b"}).String(): []sample{{0, 1}, {10, 3}}, // func TestDeleteUntilCurMax(t *testing.T) {
}, ssMap) // numSamples := int64(10)
require.NoError(t, q.Close())
} // dir, _ := ioutil.TempDir("", "test")
// defer os.RemoveAll(dir)
func TestHeadBlock_e2e(t *testing.T) {
numDatapoints := 1000 // hb := createTestHead(t, dir, 0, 2*numSamples)
numRanges := 1000 // app := hb.Appender()
timeInterval := int64(3)
maxTime := int64(2 * 1000) // smpls := make([]float64, numSamples)
minTime := int64(200) // for i := int64(0); i < numSamples; i++ {
// Create 8 series with 1000 data-points of different ranges and run queries. // smpls[i] = rand.Float64()
lbls := [][]labels.Label{ // app.Add(labels.Labels{{"a", "b"}}, i, smpls[i])
{ // }
{"a", "b"},
{"instance", "localhost:9090"}, // require.NoError(t, app.Commit())
{"job", "prometheus"}, // require.NoError(t, hb.Delete(0, 10000, labels.NewEqualMatcher("a", "b")))
}, // app = hb.Appender()
{ // _, err := app.Add(labels.Labels{{"a", "b"}}, 11, 1)
{"a", "b"}, // require.NoError(t, err)
{"instance", "127.0.0.1:9090"}, // require.NoError(t, app.Commit())
{"job", "prometheus"},
}, // q := hb.Querier(0, 100000)
{ // res := q.Select(labels.NewEqualMatcher("a", "b"))
{"a", "b"},
{"instance", "127.0.0.1:9090"}, // require.True(t, res.Next())
{"job", "prom-k8s"}, // exps := res.At()
}, // it := exps.Iterator()
{ // ressmpls, err := expandSeriesIterator(it)
{"a", "b"}, // require.NoError(t, err)
{"instance", "localhost:9090"}, // require.Equal(t, []sample{{11, 1}}, ressmpls)
{"job", "prom-k8s"}, // }
},
{ // func TestDelete_e2e(t *testing.T) {
{"a", "c"}, // numDatapoints := 1000
{"instance", "localhost:9090"}, // numRanges := 1000
{"job", "prometheus"}, // timeInterval := int64(2)
}, // maxTime := int64(2 * 1000)
{ // minTime := int64(200)
{"a", "c"}, // // Create 8 series with 1000 data-points of different ranges, delete and run queries.
{"instance", "127.0.0.1:9090"}, // lbls := [][]labels.Label{
{"job", "prometheus"}, // {
}, // {"a", "b"},
{ // {"instance", "localhost:9090"},
{"a", "c"}, // {"job", "prometheus"},
{"instance", "127.0.0.1:9090"}, // },
{"job", "prom-k8s"}, // {
}, // {"a", "b"},
{ // {"instance", "127.0.0.1:9090"},
{"a", "c"}, // {"job", "prometheus"},
{"instance", "localhost:9090"}, // },
{"job", "prom-k8s"}, // {
}, // {"a", "b"},
} // {"instance", "127.0.0.1:9090"},
// {"job", "prom-k8s"},
seriesMap := map[string][]sample{} // },
for _, l := range lbls { // {
seriesMap[labels.New(l...).String()] = []sample{} // {"a", "b"},
} // {"instance", "localhost:9090"},
// {"job", "prom-k8s"},
dir, _ := ioutil.TempDir("", "test") // },
defer os.RemoveAll(dir) // {
// {"a", "c"},
hb := createTestHeadBlock(t, dir, minTime, maxTime) // {"instance", "localhost:9090"},
app := hb.Appender() // {"job", "prometheus"},
// },
for _, l := range lbls { // {
ls := labels.New(l...) // {"a", "c"},
series := []sample{} // {"instance", "127.0.0.1:9090"},
// {"job", "prometheus"},
ts := rand.Int63n(300) // },
for i := 0; i < numDatapoints; i++ { // {
v := rand.Float64() // {"a", "c"},
if ts >= minTime && ts <= maxTime { // {"instance", "127.0.0.1:9090"},
series = append(series, sample{ts, v}) // {"job", "prom-k8s"},
} // },
// {
_, err := app.Add(ls, ts, v) // {"a", "c"},
if ts >= minTime && ts <= maxTime { // {"instance", "localhost:9090"},
require.NoError(t, err) // {"job", "prom-k8s"},
} else { // },
require.EqualError(t, err, ErrOutOfBounds.Error()) // }
}
// seriesMap := map[string][]sample{}
ts += rand.Int63n(timeInterval) + 1 // for _, l := range lbls {
} // seriesMap[labels.New(l...).String()] = []sample{}
// }
seriesMap[labels.New(l...).String()] = series
} // dir, _ := ioutil.TempDir("", "test")
// defer os.RemoveAll(dir)
require.NoError(t, app.Commit())
// hb := createTestHead(t, dir, minTime, maxTime)
// Query each selector on 1000 random time-ranges. // app := hb.Appender()
queries := []struct {
ms []labels.Matcher // for _, l := range lbls {
}{ // ls := labels.New(l...)
{ // series := []sample{}
ms: []labels.Matcher{labels.NewEqualMatcher("a", "b")},
}, // ts := rand.Int63n(300)
{ // for i := 0; i < numDatapoints; i++ {
ms: []labels.Matcher{ // v := rand.Float64()
labels.NewEqualMatcher("a", "b"), // if ts >= minTime && ts <= maxTime {
labels.NewEqualMatcher("job", "prom-k8s"), // series = append(series, sample{ts, v})
}, // }
},
{ // _, err := app.Add(ls, ts, v)
ms: []labels.Matcher{ // if ts >= minTime && ts <= maxTime {
labels.NewEqualMatcher("a", "c"), // require.NoError(t, err)
labels.NewEqualMatcher("instance", "localhost:9090"), // } else {
labels.NewEqualMatcher("job", "prometheus"), // require.EqualError(t, err, ErrOutOfBounds.Error())
}, // }
},
// TODO: Add Regexp Matchers. // ts += rand.Int63n(timeInterval) + 1
} // }
for _, qry := range queries { // seriesMap[labels.New(l...).String()] = series
matched := labels.Slice{} // }
for _, ls := range lbls {
s := labels.Selector(qry.ms) // require.NoError(t, app.Commit())
if s.Matches(ls) {
matched = append(matched, ls) // // Delete a time-range from each-selector.
} // dels := []struct {
} // ms []labels.Matcher
// drange Intervals
sort.Sort(matched) // }{
// {
for i := 0; i < numRanges; i++ { // ms: []labels.Matcher{labels.NewEqualMatcher("a", "b")},
mint := rand.Int63n(300) // drange: Intervals{{300, 500}, {600, 670}},
maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints)) // },
// {
q := hb.Querier(mint, maxt) // ms: []labels.Matcher{
ss := q.Select(qry.ms...) // labels.NewEqualMatcher("a", "b"),
// labels.NewEqualMatcher("job", "prom-k8s"),
// Build the mockSeriesSet. // },
matchedSeries := make([]Series, 0, len(matched)) // drange: Intervals{{300, 500}, {100, 670}},
for _, m := range matched { // },
smpls := boundedSamples(seriesMap[m.String()], mint, maxt) // {
// ms: []labels.Matcher{
// Only append those series for which samples exist as mockSeriesSet // labels.NewEqualMatcher("a", "c"),
// doesn't skip series with no samples. // labels.NewEqualMatcher("instance", "localhost:9090"),
// TODO: But sometimes SeriesSet returns an empty SeriesIterator // labels.NewEqualMatcher("job", "prometheus"),
if len(smpls) > 0 { // },
matchedSeries = append(matchedSeries, newSeries( // drange: Intervals{{300, 400}, {100, 6700}},
m.Map(), // },
smpls, // // TODO: Add Regexp Matchers.
)) // }
}
} // for _, del := range dels {
expSs := newListSeriesSet(matchedSeries) // // Reset the deletes everytime.
// writeTombstoneFile(hb.dir, newEmptyTombstoneReader())
// Compare both SeriesSets. // hb.tombstones = newEmptyTombstoneReader()
for {
eok, rok := expSs.Next(), ss.Next() // for _, r := range del.drange {
// require.NoError(t, hb.Delete(r.Mint, r.Maxt, del.ms...))
// Skip a series if iterator is empty. // }
if rok {
for !ss.At().Iterator().Next() { // matched := labels.Slice{}
rok = ss.Next() // for _, ls := range lbls {
if !rok { // s := labels.Selector(del.ms)
break // if s.Matches(ls) {
} // matched = append(matched, ls)
} // }
} // }
require.Equal(t, eok, rok, "next") // sort.Sort(matched)
if !eok { // for i := 0; i < numRanges; i++ {
break // mint := rand.Int63n(200)
} // maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints))
sexp := expSs.At()
sres := ss.At() // q := hb.Querier(mint, maxt)
// ss := q.Select(del.ms...)
require.Equal(t, sexp.Labels(), sres.Labels(), "labels")
// // Build the mockSeriesSet.
smplExp, errExp := expandSeriesIterator(sexp.Iterator()) // matchedSeries := make([]Series, 0, len(matched))
smplRes, errRes := expandSeriesIterator(sres.Iterator()) // for _, m := range matched {
// smpls := boundedSamples(seriesMap[m.String()], mint, maxt)
require.Equal(t, errExp, errRes, "samples error") // smpls = deletedSamples(smpls, del.drange)
require.Equal(t, smplExp, smplRes, "samples")
} // // Only append those series for which samples exist as mockSeriesSet
} // // doesn't skip series with no samples.
} // // TODO: But sometimes SeriesSet returns an empty SeriesIterator
// if len(smpls) > 0 {
return // matchedSeries = append(matchedSeries, newSeries(
} // m.Map(),
// smpls,
func TestHBDeleteSimple(t *testing.T) { // ))
numSamples := int64(10) // }
// }
dir, _ := ioutil.TempDir("", "test") // expSs := newListSeriesSet(matchedSeries)
defer os.RemoveAll(dir)
// // Compare both SeriesSets.
hb := createTestHeadBlock(t, dir, 0, numSamples) // for {
app := hb.Appender() // eok, rok := expSs.Next(), ss.Next()
smpls := make([]float64, numSamples) // // Skip a series if iterator is empty.
for i := int64(0); i < numSamples; i++ { // if rok {
smpls[i] = rand.Float64() // for !ss.At().Iterator().Next() {
app.Add(labels.Labels{{"a", "b"}}, i, smpls[i]) // rok = ss.Next()
} // if !rok {
// break
require.NoError(t, app.Commit()) // }
cases := []struct { // }
intervals Intervals // }
remaint []int64 // require.Equal(t, eok, rok, "next")
}{
{ // if !eok {
intervals: Intervals{{0, 3}}, // break
remaint: []int64{4, 5, 6, 7, 8, 9}, // }
}, // sexp := expSs.At()
{ // sres := ss.At()
intervals: Intervals{{1, 3}},
remaint: []int64{0, 4, 5, 6, 7, 8, 9}, // require.Equal(t, sexp.Labels(), sres.Labels(), "labels")
},
{ // smplExp, errExp := expandSeriesIterator(sexp.Iterator())
intervals: Intervals{{1, 3}, {4, 7}}, // smplRes, errRes := expandSeriesIterator(sres.Iterator())
remaint: []int64{0, 8, 9},
}, // require.Equal(t, errExp, errRes, "samples error")
{ // require.Equal(t, smplExp, smplRes, "samples")
intervals: Intervals{{1, 3}, {4, 700}}, // }
remaint: []int64{0}, // }
}, // }
{
intervals: Intervals{{0, 9}}, // return
remaint: []int64{}, // }
},
}
Outer:
for _, c := range cases {
// Reset the tombstones.
hb.tombstones = newEmptyTombstoneReader()
// Delete the ranges.
for _, r := range c.intervals {
require.NoError(t, hb.Delete(r.Mint, r.Maxt, labels.NewEqualMatcher("a", "b")))
}
// Compare the result.
q := hb.Querier(0, numSamples)
res := q.Select(labels.NewEqualMatcher("a", "b"))
expSamples := make([]sample, 0, len(c.remaint))
for _, ts := range c.remaint {
expSamples = append(expSamples, sample{ts, smpls[ts]})
}
expss := newListSeriesSet([]Series{
newSeries(map[string]string{"a": "b"}, expSamples),
})
if len(expSamples) == 0 {
require.False(t, res.Next())
continue
}
for {
eok, rok := expss.Next(), res.Next()
require.Equal(t, eok, rok, "next")
if !eok {
continue Outer
}
sexp := expss.At()
sres := res.At()
require.Equal(t, sexp.Labels(), sres.Labels(), "labels")
smplExp, errExp := expandSeriesIterator(sexp.Iterator())
smplRes, errRes := expandSeriesIterator(sres.Iterator())
require.Equal(t, errExp, errRes, "samples error")
require.Equal(t, smplExp, smplRes, "samples")
}
}
}
func TestDeleteUntilCurMax(t *testing.T) {
numSamples := int64(10)
dir, _ := ioutil.TempDir("", "test")
defer os.RemoveAll(dir)
hb := createTestHeadBlock(t, dir, 0, 2*numSamples)
app := hb.Appender()
smpls := make([]float64, numSamples)
for i := int64(0); i < numSamples; i++ {
smpls[i] = rand.Float64()
app.Add(labels.Labels{{"a", "b"}}, i, smpls[i])
}
require.NoError(t, app.Commit())
require.NoError(t, hb.Delete(0, 10000, labels.NewEqualMatcher("a", "b")))
app = hb.Appender()
_, err := app.Add(labels.Labels{{"a", "b"}}, 11, 1)
require.NoError(t, err)
require.NoError(t, app.Commit())
q := hb.Querier(0, 100000)
res := q.Select(labels.NewEqualMatcher("a", "b"))
require.True(t, res.Next())
exps := res.At()
it := exps.Iterator()
ressmpls, err := expandSeriesIterator(it)
require.NoError(t, err)
require.Equal(t, []sample{{11, 1}}, ressmpls)
}
func TestDelete_e2e(t *testing.T) {
numDatapoints := 1000
numRanges := 1000
timeInterval := int64(2)
maxTime := int64(2 * 1000)
minTime := int64(200)
// Create 8 series with 1000 data-points of different ranges, delete and run queries.
lbls := [][]labels.Label{
{
{"a", "b"},
{"instance", "localhost:9090"},
{"job", "prometheus"},
},
{
{"a", "b"},
{"instance", "127.0.0.1:9090"},
{"job", "prometheus"},
},
{
{"a", "b"},
{"instance", "127.0.0.1:9090"},
{"job", "prom-k8s"},
},
{
{"a", "b"},
{"instance", "localhost:9090"},
{"job", "prom-k8s"},
},
{
{"a", "c"},
{"instance", "localhost:9090"},
{"job", "prometheus"},
},
{
{"a", "c"},
{"instance", "127.0.0.1:9090"},
{"job", "prometheus"},
},
{
{"a", "c"},
{"instance", "127.0.0.1:9090"},
{"job", "prom-k8s"},
},
{
{"a", "c"},
{"instance", "localhost:9090"},
{"job", "prom-k8s"},
},
}
seriesMap := map[string][]sample{}
for _, l := range lbls {
seriesMap[labels.New(l...).String()] = []sample{}
}
dir, _ := ioutil.TempDir("", "test")
defer os.RemoveAll(dir)
hb := createTestHeadBlock(t, dir, minTime, maxTime)
app := hb.Appender()
for _, l := range lbls {
ls := labels.New(l...)
series := []sample{}
ts := rand.Int63n(300)
for i := 0; i < numDatapoints; i++ {
v := rand.Float64()
if ts >= minTime && ts <= maxTime {
series = append(series, sample{ts, v})
}
_, err := app.Add(ls, ts, v)
if ts >= minTime && ts <= maxTime {
require.NoError(t, err)
} else {
require.EqualError(t, err, ErrOutOfBounds.Error())
}
ts += rand.Int63n(timeInterval) + 1
}
seriesMap[labels.New(l...).String()] = series
}
require.NoError(t, app.Commit())
// Delete a time-range from each-selector.
dels := []struct {
ms []labels.Matcher
drange Intervals
}{
{
ms: []labels.Matcher{labels.NewEqualMatcher("a", "b")},
drange: Intervals{{300, 500}, {600, 670}},
},
{
ms: []labels.Matcher{
labels.NewEqualMatcher("a", "b"),
labels.NewEqualMatcher("job", "prom-k8s"),
},
drange: Intervals{{300, 500}, {100, 670}},
},
{
ms: []labels.Matcher{
labels.NewEqualMatcher("a", "c"),
labels.NewEqualMatcher("instance", "localhost:9090"),
labels.NewEqualMatcher("job", "prometheus"),
},
drange: Intervals{{300, 400}, {100, 6700}},
},
// TODO: Add Regexp Matchers.
}
for _, del := range dels {
// Reset the deletes everytime.
writeTombstoneFile(hb.dir, newEmptyTombstoneReader())
hb.tombstones = newEmptyTombstoneReader()
for _, r := range del.drange {
require.NoError(t, hb.Delete(r.Mint, r.Maxt, del.ms...))
}
matched := labels.Slice{}
for _, ls := range lbls {
s := labels.Selector(del.ms)
if s.Matches(ls) {
matched = append(matched, ls)
}
}
sort.Sort(matched)
for i := 0; i < numRanges; i++ {
mint := rand.Int63n(200)
maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints))
q := hb.Querier(mint, maxt)
ss := q.Select(del.ms...)
// Build the mockSeriesSet.
matchedSeries := make([]Series, 0, len(matched))
for _, m := range matched {
smpls := boundedSamples(seriesMap[m.String()], mint, maxt)
smpls = deletedSamples(smpls, del.drange)
// Only append those series for which samples exist as mockSeriesSet
// doesn't skip series with no samples.
// TODO: But sometimes SeriesSet returns an empty SeriesIterator
if len(smpls) > 0 {
matchedSeries = append(matchedSeries, newSeries(
m.Map(),
smpls,
))
}
}
expSs := newListSeriesSet(matchedSeries)
// Compare both SeriesSets.
for {
eok, rok := expSs.Next(), ss.Next()
// Skip a series if iterator is empty.
if rok {
for !ss.At().Iterator().Next() {
rok = ss.Next()
if !rok {
break
}
}
}
require.Equal(t, eok, rok, "next")
if !eok {
break
}
sexp := expSs.At()
sres := ss.At()
require.Equal(t, sexp.Labels(), sres.Labels(), "labels")
smplExp, errExp := expandSeriesIterator(sexp.Iterator())
smplRes, errRes := expandSeriesIterator(sres.Iterator())
require.Equal(t, errExp, errRes, "samples error")
require.Equal(t, smplExp, smplRes, "samples")
}
}
}
return
}
func boundedSamples(full []sample, mint, maxt int64) []sample { func boundedSamples(full []sample, mint, maxt int64) []sample {
for len(full) > 0 { for len(full) > 0 {
@ -725,7 +430,6 @@ Outer:
continue Outer continue Outer
} }
} }
ds = append(ds, s) ds = append(ds, s)
} }

View file

@ -54,26 +54,6 @@ type querier struct {
blocks []Querier blocks []Querier
} }
// Querier returns a new querier over the data partition for the given time range.
// A goroutine must not handle more than one open Querier.
func (s *DB) Querier(mint, maxt int64) Querier {
s.mtx.RLock()
s.headmtx.RLock()
blocks := s.blocksForInterval(mint, maxt)
s.headmtx.RUnlock()
sq := &querier{
blocks: make([]Querier, 0, len(blocks)),
db: s,
}
for _, b := range blocks {
sq.blocks = append(sq.blocks, b.Querier(mint, maxt))
}
return sq
}
func (q *querier) LabelValues(n string) ([]string, error) { func (q *querier) LabelValues(n string) ([]string, error) {
return q.lvals(q.blocks, n) return q.lvals(q.blocks, n)
} }
@ -700,6 +680,7 @@ type chunkSeriesIterator struct {
func newChunkSeriesIterator(cs []ChunkMeta, dranges Intervals, mint, maxt int64) *chunkSeriesIterator { func newChunkSeriesIterator(cs []ChunkMeta, dranges Intervals, mint, maxt int64) *chunkSeriesIterator {
it := cs[0].Chunk.Iterator() it := cs[0].Chunk.Iterator()
if len(dranges) > 0 { if len(dranges) > 0 {
it = &deletedIterator{it: it, intervals: dranges} it = &deletedIterator{it: it, intervals: dranges}
} }
@ -750,19 +731,22 @@ func (it *chunkSeriesIterator) At() (t int64, v float64) {
} }
func (it *chunkSeriesIterator) Next() bool { func (it *chunkSeriesIterator) Next() bool {
for it.cur.Next() { if it.cur.Next() {
t, _ := it.cur.At() t, _ := it.cur.At()
if t < it.mint {
return it.Seek(it.mint)
}
if t < it.mint {
if !it.Seek(it.mint) {
return false
}
t, _ = it.At()
return t <= it.maxt
}
if t > it.maxt { if t > it.maxt {
return false return false
} }
return true return true
} }
if err := it.cur.Err(); err != nil { if err := it.cur.Err(); err != nil {
return false return false
} }

View file

@ -1138,6 +1138,17 @@ func TestChunkSeriesIterator_SeekInCurrentChunk(t *testing.T) {
require.Equal(t, float64(6), v) require.Equal(t, float64(6), v)
} }
// Regression when calling Next() with a time bounded to fit within two samples.
// Seek gets called and advances beyond the max time, which was just accepted as a valid sample.
func TestChunkSeriesIterator_NextWithMinTime(t *testing.T) {
metas := []ChunkMeta{
chunkFromSamples([]sample{{1, 6}, {5, 6}, {7, 8}}),
}
it := newChunkSeriesIterator(metas, nil, 2, 4)
require.False(t, it.Next())
}
func TestPopulatedCSReturnsValidChunkSlice(t *testing.T) { func TestPopulatedCSReturnsValidChunkSlice(t *testing.T) {
lbls := []labels.Labels{labels.New(labels.Label{"a", "b"})} lbls := []labels.Labels{labels.New(labels.Label{"a", "b"})}
chunkMetas := [][]ChunkMeta{ chunkMetas := [][]ChunkMeta{

17
wal.go
View file

@ -84,9 +84,19 @@ type WAL interface {
LogSeries([]labels.Labels) error LogSeries([]labels.Labels) error
LogSamples([]RefSample) error LogSamples([]RefSample) error
LogDeletes([]Stone) error LogDeletes([]Stone) error
Truncate(maxt int64) error
Close() error Close() error
} }
type NopWAL struct{}
func (NopWAL) Read(SeriesCB, SamplesCB, DeletesCB) error { return nil }
func (w NopWAL) Reader() WALReader { return w }
func (NopWAL) LogSeries([]labels.Labels) error { return nil }
func (NopWAL) LogSamples([]RefSample) error { return nil }
func (NopWAL) LogDeletes([]Stone) error { return nil }
func (NopWAL) Close() error { return nil }
// WALReader reads entries from a WAL. // WALReader reads entries from a WAL.
type WALReader interface { type WALReader interface {
Read(SeriesCB, SamplesCB, DeletesCB) error Read(SeriesCB, SamplesCB, DeletesCB) error
@ -319,6 +329,10 @@ func (w *SegmentWAL) Sync() error {
return fileutil.Fdatasync(tail) return fileutil.Fdatasync(tail)
} }
func (w *SegmentWAL) Truncate(maxt int64) error {
return nil
}
func (w *SegmentWAL) sync() error { func (w *SegmentWAL) sync() error {
if err := w.flush(); err != nil { if err := w.flush(); err != nil {
return err return err
@ -360,9 +374,8 @@ func (w *SegmentWAL) Close() error {
close(w.stopc) close(w.stopc)
<-w.donec <-w.donec
// Lock mutex and leave it locked so we panic if there's a bug causing
// the block to be used afterwards.
w.mtx.Lock() w.mtx.Lock()
defer w.mtx.Unlock()
if err := w.sync(); err != nil { if err := w.sync(); err != nil {
return err return err